##// END OF EJS Templates
tags: remove another check for valid nodes...
Idan Kamara -
r13909:184cf2fa default
parent child Browse files
Show More
@@ -1,1937 +1,1934
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks
11 import changelog, dirstate, filelog, manifest, context, bookmarks
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 import url as urlmod
17 import url as urlmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 propertycache = util.propertycache
20 propertycache = util.propertycache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 'known', 'getbundle'))
24 'known', 'getbundle'))
25 supportedformats = set(('revlogv1', 'parentdelta'))
25 supportedformats = set(('revlogv1', 'parentdelta'))
26 supported = supportedformats | set(('store', 'fncache', 'shared',
26 supported = supportedformats | set(('store', 'fncache', 'shared',
27 'dotencode'))
27 'dotencode'))
28
28
29 def __init__(self, baseui, path=None, create=0):
29 def __init__(self, baseui, path=None, create=0):
30 repo.repository.__init__(self)
30 repo.repository.__init__(self)
31 self.root = os.path.realpath(util.expandpath(path))
31 self.root = os.path.realpath(util.expandpath(path))
32 self.path = os.path.join(self.root, ".hg")
32 self.path = os.path.join(self.root, ".hg")
33 self.origroot = path
33 self.origroot = path
34 self.auditor = util.path_auditor(self.root, self._checknested)
34 self.auditor = util.path_auditor(self.root, self._checknested)
35 self.opener = util.opener(self.path)
35 self.opener = util.opener(self.path)
36 self.wopener = util.opener(self.root)
36 self.wopener = util.opener(self.root)
37 self.baseui = baseui
37 self.baseui = baseui
38 self.ui = baseui.copy()
38 self.ui = baseui.copy()
39
39
40 try:
40 try:
41 self.ui.readconfig(self.join("hgrc"), self.root)
41 self.ui.readconfig(self.join("hgrc"), self.root)
42 extensions.loadall(self.ui)
42 extensions.loadall(self.ui)
43 except IOError:
43 except IOError:
44 pass
44 pass
45
45
46 if not os.path.isdir(self.path):
46 if not os.path.isdir(self.path):
47 if create:
47 if create:
48 if not os.path.exists(path):
48 if not os.path.exists(path):
49 util.makedirs(path)
49 util.makedirs(path)
50 util.makedir(self.path, notindexed=True)
50 util.makedir(self.path, notindexed=True)
51 requirements = ["revlogv1"]
51 requirements = ["revlogv1"]
52 if self.ui.configbool('format', 'usestore', True):
52 if self.ui.configbool('format', 'usestore', True):
53 os.mkdir(os.path.join(self.path, "store"))
53 os.mkdir(os.path.join(self.path, "store"))
54 requirements.append("store")
54 requirements.append("store")
55 if self.ui.configbool('format', 'usefncache', True):
55 if self.ui.configbool('format', 'usefncache', True):
56 requirements.append("fncache")
56 requirements.append("fncache")
57 if self.ui.configbool('format', 'dotencode', True):
57 if self.ui.configbool('format', 'dotencode', True):
58 requirements.append('dotencode')
58 requirements.append('dotencode')
59 # create an invalid changelog
59 # create an invalid changelog
60 self.opener("00changelog.i", "a").write(
60 self.opener("00changelog.i", "a").write(
61 '\0\0\0\2' # represents revlogv2
61 '\0\0\0\2' # represents revlogv2
62 ' dummy changelog to prevent using the old repo layout'
62 ' dummy changelog to prevent using the old repo layout'
63 )
63 )
64 if self.ui.configbool('format', 'parentdelta', False):
64 if self.ui.configbool('format', 'parentdelta', False):
65 requirements.append("parentdelta")
65 requirements.append("parentdelta")
66 else:
66 else:
67 raise error.RepoError(_("repository %s not found") % path)
67 raise error.RepoError(_("repository %s not found") % path)
68 elif create:
68 elif create:
69 raise error.RepoError(_("repository %s already exists") % path)
69 raise error.RepoError(_("repository %s already exists") % path)
70 else:
70 else:
71 # find requirements
71 # find requirements
72 requirements = set()
72 requirements = set()
73 try:
73 try:
74 requirements = set(self.opener("requires").read().splitlines())
74 requirements = set(self.opener("requires").read().splitlines())
75 except IOError, inst:
75 except IOError, inst:
76 if inst.errno != errno.ENOENT:
76 if inst.errno != errno.ENOENT:
77 raise
77 raise
78 for r in requirements - self.supported:
78 for r in requirements - self.supported:
79 raise error.RequirementError(
79 raise error.RequirementError(
80 _("requirement '%s' not supported") % r)
80 _("requirement '%s' not supported") % r)
81
81
82 self.sharedpath = self.path
82 self.sharedpath = self.path
83 try:
83 try:
84 s = os.path.realpath(self.opener("sharedpath").read())
84 s = os.path.realpath(self.opener("sharedpath").read())
85 if not os.path.exists(s):
85 if not os.path.exists(s):
86 raise error.RepoError(
86 raise error.RepoError(
87 _('.hg/sharedpath points to nonexistent directory %s') % s)
87 _('.hg/sharedpath points to nonexistent directory %s') % s)
88 self.sharedpath = s
88 self.sharedpath = s
89 except IOError, inst:
89 except IOError, inst:
90 if inst.errno != errno.ENOENT:
90 if inst.errno != errno.ENOENT:
91 raise
91 raise
92
92
93 self.store = store.store(requirements, self.sharedpath, util.opener)
93 self.store = store.store(requirements, self.sharedpath, util.opener)
94 self.spath = self.store.path
94 self.spath = self.store.path
95 self.sopener = self.store.opener
95 self.sopener = self.store.opener
96 self.sjoin = self.store.join
96 self.sjoin = self.store.join
97 self.opener.createmode = self.store.createmode
97 self.opener.createmode = self.store.createmode
98 self._applyrequirements(requirements)
98 self._applyrequirements(requirements)
99 if create:
99 if create:
100 self._writerequirements()
100 self._writerequirements()
101
101
102 # These two define the set of tags for this repository. _tags
102 # These two define the set of tags for this repository. _tags
103 # maps tag name to node; _tagtypes maps tag name to 'global' or
103 # maps tag name to node; _tagtypes maps tag name to 'global' or
104 # 'local'. (Global tags are defined by .hgtags across all
104 # 'local'. (Global tags are defined by .hgtags across all
105 # heads, and local tags are defined in .hg/localtags.) They
105 # heads, and local tags are defined in .hg/localtags.) They
106 # constitute the in-memory cache of tags.
106 # constitute the in-memory cache of tags.
107 self._tags = None
107 self._tags = None
108 self._tagtypes = None
108 self._tagtypes = None
109
109
110 self._branchcache = None
110 self._branchcache = None
111 self._branchcachetip = None
111 self._branchcachetip = None
112 self.nodetagscache = None
112 self.nodetagscache = None
113 self.filterpats = {}
113 self.filterpats = {}
114 self._datafilters = {}
114 self._datafilters = {}
115 self._transref = self._lockref = self._wlockref = None
115 self._transref = self._lockref = self._wlockref = None
116
116
117 def _applyrequirements(self, requirements):
117 def _applyrequirements(self, requirements):
118 self.requirements = requirements
118 self.requirements = requirements
119 self.sopener.options = {}
119 self.sopener.options = {}
120 if 'parentdelta' in requirements:
120 if 'parentdelta' in requirements:
121 self.sopener.options['parentdelta'] = 1
121 self.sopener.options['parentdelta'] = 1
122
122
123 def _writerequirements(self):
123 def _writerequirements(self):
124 reqfile = self.opener("requires", "w")
124 reqfile = self.opener("requires", "w")
125 for r in self.requirements:
125 for r in self.requirements:
126 reqfile.write("%s\n" % r)
126 reqfile.write("%s\n" % r)
127 reqfile.close()
127 reqfile.close()
128
128
129 def _checknested(self, path):
129 def _checknested(self, path):
130 """Determine if path is a legal nested repository."""
130 """Determine if path is a legal nested repository."""
131 if not path.startswith(self.root):
131 if not path.startswith(self.root):
132 return False
132 return False
133 subpath = path[len(self.root) + 1:]
133 subpath = path[len(self.root) + 1:]
134
134
135 # XXX: Checking against the current working copy is wrong in
135 # XXX: Checking against the current working copy is wrong in
136 # the sense that it can reject things like
136 # the sense that it can reject things like
137 #
137 #
138 # $ hg cat -r 10 sub/x.txt
138 # $ hg cat -r 10 sub/x.txt
139 #
139 #
140 # if sub/ is no longer a subrepository in the working copy
140 # if sub/ is no longer a subrepository in the working copy
141 # parent revision.
141 # parent revision.
142 #
142 #
143 # However, it can of course also allow things that would have
143 # However, it can of course also allow things that would have
144 # been rejected before, such as the above cat command if sub/
144 # been rejected before, such as the above cat command if sub/
145 # is a subrepository now, but was a normal directory before.
145 # is a subrepository now, but was a normal directory before.
146 # The old path auditor would have rejected by mistake since it
146 # The old path auditor would have rejected by mistake since it
147 # panics when it sees sub/.hg/.
147 # panics when it sees sub/.hg/.
148 #
148 #
149 # All in all, checking against the working copy seems sensible
149 # All in all, checking against the working copy seems sensible
150 # since we want to prevent access to nested repositories on
150 # since we want to prevent access to nested repositories on
151 # the filesystem *now*.
151 # the filesystem *now*.
152 ctx = self[None]
152 ctx = self[None]
153 parts = util.splitpath(subpath)
153 parts = util.splitpath(subpath)
154 while parts:
154 while parts:
155 prefix = os.sep.join(parts)
155 prefix = os.sep.join(parts)
156 if prefix in ctx.substate:
156 if prefix in ctx.substate:
157 if prefix == subpath:
157 if prefix == subpath:
158 return True
158 return True
159 else:
159 else:
160 sub = ctx.sub(prefix)
160 sub = ctx.sub(prefix)
161 return sub.checknested(subpath[len(prefix) + 1:])
161 return sub.checknested(subpath[len(prefix) + 1:])
162 else:
162 else:
163 parts.pop()
163 parts.pop()
164 return False
164 return False
165
165
166 @util.propertycache
166 @util.propertycache
167 def _bookmarks(self):
167 def _bookmarks(self):
168 return bookmarks.read(self)
168 return bookmarks.read(self)
169
169
170 @util.propertycache
170 @util.propertycache
171 def _bookmarkcurrent(self):
171 def _bookmarkcurrent(self):
172 return bookmarks.readcurrent(self)
172 return bookmarks.readcurrent(self)
173
173
174 @propertycache
174 @propertycache
175 def changelog(self):
175 def changelog(self):
176 c = changelog.changelog(self.sopener)
176 c = changelog.changelog(self.sopener)
177 if 'HG_PENDING' in os.environ:
177 if 'HG_PENDING' in os.environ:
178 p = os.environ['HG_PENDING']
178 p = os.environ['HG_PENDING']
179 if p.startswith(self.root):
179 if p.startswith(self.root):
180 c.readpending('00changelog.i.a')
180 c.readpending('00changelog.i.a')
181 self.sopener.options['defversion'] = c.version
181 self.sopener.options['defversion'] = c.version
182 return c
182 return c
183
183
184 @propertycache
184 @propertycache
185 def manifest(self):
185 def manifest(self):
186 return manifest.manifest(self.sopener)
186 return manifest.manifest(self.sopener)
187
187
188 @propertycache
188 @propertycache
189 def dirstate(self):
189 def dirstate(self):
190 warned = [0]
190 warned = [0]
191 def validate(node):
191 def validate(node):
192 try:
192 try:
193 r = self.changelog.rev(node)
193 r = self.changelog.rev(node)
194 return node
194 return node
195 except error.LookupError:
195 except error.LookupError:
196 if not warned[0]:
196 if not warned[0]:
197 warned[0] = True
197 warned[0] = True
198 self.ui.warn(_("warning: ignoring unknown"
198 self.ui.warn(_("warning: ignoring unknown"
199 " working parent %s!\n") % short(node))
199 " working parent %s!\n") % short(node))
200 return nullid
200 return nullid
201
201
202 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
202 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
203
203
204 def __getitem__(self, changeid):
204 def __getitem__(self, changeid):
205 if changeid is None:
205 if changeid is None:
206 return context.workingctx(self)
206 return context.workingctx(self)
207 return context.changectx(self, changeid)
207 return context.changectx(self, changeid)
208
208
209 def __contains__(self, changeid):
209 def __contains__(self, changeid):
210 try:
210 try:
211 return bool(self.lookup(changeid))
211 return bool(self.lookup(changeid))
212 except error.RepoLookupError:
212 except error.RepoLookupError:
213 return False
213 return False
214
214
215 def __nonzero__(self):
215 def __nonzero__(self):
216 return True
216 return True
217
217
218 def __len__(self):
218 def __len__(self):
219 return len(self.changelog)
219 return len(self.changelog)
220
220
221 def __iter__(self):
221 def __iter__(self):
222 for i in xrange(len(self)):
222 for i in xrange(len(self)):
223 yield i
223 yield i
224
224
225 def url(self):
225 def url(self):
226 return 'file:' + self.root
226 return 'file:' + self.root
227
227
228 def hook(self, name, throw=False, **args):
228 def hook(self, name, throw=False, **args):
229 return hook.hook(self.ui, self, name, throw, **args)
229 return hook.hook(self.ui, self, name, throw, **args)
230
230
231 tag_disallowed = ':\r\n'
231 tag_disallowed = ':\r\n'
232
232
233 def _tag(self, names, node, message, local, user, date, extra={}):
233 def _tag(self, names, node, message, local, user, date, extra={}):
234 if isinstance(names, str):
234 if isinstance(names, str):
235 allchars = names
235 allchars = names
236 names = (names,)
236 names = (names,)
237 else:
237 else:
238 allchars = ''.join(names)
238 allchars = ''.join(names)
239 for c in self.tag_disallowed:
239 for c in self.tag_disallowed:
240 if c in allchars:
240 if c in allchars:
241 raise util.Abort(_('%r cannot be used in a tag name') % c)
241 raise util.Abort(_('%r cannot be used in a tag name') % c)
242
242
243 branches = self.branchmap()
243 branches = self.branchmap()
244 for name in names:
244 for name in names:
245 self.hook('pretag', throw=True, node=hex(node), tag=name,
245 self.hook('pretag', throw=True, node=hex(node), tag=name,
246 local=local)
246 local=local)
247 if name in branches:
247 if name in branches:
248 self.ui.warn(_("warning: tag %s conflicts with existing"
248 self.ui.warn(_("warning: tag %s conflicts with existing"
249 " branch name\n") % name)
249 " branch name\n") % name)
250
250
251 def writetags(fp, names, munge, prevtags):
251 def writetags(fp, names, munge, prevtags):
252 fp.seek(0, 2)
252 fp.seek(0, 2)
253 if prevtags and prevtags[-1] != '\n':
253 if prevtags and prevtags[-1] != '\n':
254 fp.write('\n')
254 fp.write('\n')
255 for name in names:
255 for name in names:
256 m = munge and munge(name) or name
256 m = munge and munge(name) or name
257 if self._tagtypes and name in self._tagtypes:
257 if self._tagtypes and name in self._tagtypes:
258 old = self._tags.get(name, nullid)
258 old = self._tags.get(name, nullid)
259 fp.write('%s %s\n' % (hex(old), m))
259 fp.write('%s %s\n' % (hex(old), m))
260 fp.write('%s %s\n' % (hex(node), m))
260 fp.write('%s %s\n' % (hex(node), m))
261 fp.close()
261 fp.close()
262
262
263 prevtags = ''
263 prevtags = ''
264 if local:
264 if local:
265 try:
265 try:
266 fp = self.opener('localtags', 'r+')
266 fp = self.opener('localtags', 'r+')
267 except IOError:
267 except IOError:
268 fp = self.opener('localtags', 'a')
268 fp = self.opener('localtags', 'a')
269 else:
269 else:
270 prevtags = fp.read()
270 prevtags = fp.read()
271
271
272 # local tags are stored in the current charset
272 # local tags are stored in the current charset
273 writetags(fp, names, None, prevtags)
273 writetags(fp, names, None, prevtags)
274 for name in names:
274 for name in names:
275 self.hook('tag', node=hex(node), tag=name, local=local)
275 self.hook('tag', node=hex(node), tag=name, local=local)
276 return
276 return
277
277
278 try:
278 try:
279 fp = self.wfile('.hgtags', 'rb+')
279 fp = self.wfile('.hgtags', 'rb+')
280 except IOError:
280 except IOError:
281 fp = self.wfile('.hgtags', 'ab')
281 fp = self.wfile('.hgtags', 'ab')
282 else:
282 else:
283 prevtags = fp.read()
283 prevtags = fp.read()
284
284
285 # committed tags are stored in UTF-8
285 # committed tags are stored in UTF-8
286 writetags(fp, names, encoding.fromlocal, prevtags)
286 writetags(fp, names, encoding.fromlocal, prevtags)
287
287
288 fp.close()
288 fp.close()
289
289
290 if '.hgtags' not in self.dirstate:
290 if '.hgtags' not in self.dirstate:
291 self[None].add(['.hgtags'])
291 self[None].add(['.hgtags'])
292
292
293 m = matchmod.exact(self.root, '', ['.hgtags'])
293 m = matchmod.exact(self.root, '', ['.hgtags'])
294 tagnode = self.commit(message, user, date, extra=extra, match=m)
294 tagnode = self.commit(message, user, date, extra=extra, match=m)
295
295
296 for name in names:
296 for name in names:
297 self.hook('tag', node=hex(node), tag=name, local=local)
297 self.hook('tag', node=hex(node), tag=name, local=local)
298
298
299 return tagnode
299 return tagnode
300
300
301 def tag(self, names, node, message, local, user, date):
301 def tag(self, names, node, message, local, user, date):
302 '''tag a revision with one or more symbolic names.
302 '''tag a revision with one or more symbolic names.
303
303
304 names is a list of strings or, when adding a single tag, names may be a
304 names is a list of strings or, when adding a single tag, names may be a
305 string.
305 string.
306
306
307 if local is True, the tags are stored in a per-repository file.
307 if local is True, the tags are stored in a per-repository file.
308 otherwise, they are stored in the .hgtags file, and a new
308 otherwise, they are stored in the .hgtags file, and a new
309 changeset is committed with the change.
309 changeset is committed with the change.
310
310
311 keyword arguments:
311 keyword arguments:
312
312
313 local: whether to store tags in non-version-controlled file
313 local: whether to store tags in non-version-controlled file
314 (default False)
314 (default False)
315
315
316 message: commit message to use if committing
316 message: commit message to use if committing
317
317
318 user: name of user to use if committing
318 user: name of user to use if committing
319
319
320 date: date tuple to use if committing'''
320 date: date tuple to use if committing'''
321
321
322 if not local:
322 if not local:
323 for x in self.status()[:5]:
323 for x in self.status()[:5]:
324 if '.hgtags' in x:
324 if '.hgtags' in x:
325 raise util.Abort(_('working copy of .hgtags is changed '
325 raise util.Abort(_('working copy of .hgtags is changed '
326 '(please commit .hgtags manually)'))
326 '(please commit .hgtags manually)'))
327
327
328 self.tags() # instantiate the cache
328 self.tags() # instantiate the cache
329 self._tag(names, node, message, local, user, date)
329 self._tag(names, node, message, local, user, date)
330
330
331 def tags(self):
331 def tags(self):
332 '''return a mapping of tag to node'''
332 '''return a mapping of tag to node'''
333 if self._tags is None:
333 if self._tags is None:
334 (self._tags, self._tagtypes) = self._findtags()
334 (self._tags, self._tagtypes) = self._findtags()
335
335
336 return self._tags
336 return self._tags
337
337
338 def _findtags(self):
338 def _findtags(self):
339 '''Do the hard work of finding tags. Return a pair of dicts
339 '''Do the hard work of finding tags. Return a pair of dicts
340 (tags, tagtypes) where tags maps tag name to node, and tagtypes
340 (tags, tagtypes) where tags maps tag name to node, and tagtypes
341 maps tag name to a string like \'global\' or \'local\'.
341 maps tag name to a string like \'global\' or \'local\'.
342 Subclasses or extensions are free to add their own tags, but
342 Subclasses or extensions are free to add their own tags, but
343 should be aware that the returned dicts will be retained for the
343 should be aware that the returned dicts will be retained for the
344 duration of the localrepo object.'''
344 duration of the localrepo object.'''
345
345
346 # XXX what tagtype should subclasses/extensions use? Currently
346 # XXX what tagtype should subclasses/extensions use? Currently
347 # mq and bookmarks add tags, but do not set the tagtype at all.
347 # mq and bookmarks add tags, but do not set the tagtype at all.
348 # Should each extension invent its own tag type? Should there
348 # Should each extension invent its own tag type? Should there
349 # be one tagtype for all such "virtual" tags? Or is the status
349 # be one tagtype for all such "virtual" tags? Or is the status
350 # quo fine?
350 # quo fine?
351
351
352 alltags = {} # map tag name to (node, hist)
352 alltags = {} # map tag name to (node, hist)
353 tagtypes = {}
353 tagtypes = {}
354
354
355 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
355 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
356 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
356 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
357
357
358 # Build the return dicts. Have to re-encode tag names because
358 # Build the return dicts. Have to re-encode tag names because
359 # the tags module always uses UTF-8 (in order not to lose info
359 # the tags module always uses UTF-8 (in order not to lose info
360 # writing to the cache), but the rest of Mercurial wants them in
360 # writing to the cache), but the rest of Mercurial wants them in
361 # local encoding.
361 # local encoding.
362 tags = {}
362 tags = {}
363 for (name, (node, hist)) in alltags.iteritems():
363 for (name, (node, hist)) in alltags.iteritems():
364 if node != nullid:
364 if node != nullid:
365 try:
365 try:
366 # ignore tags to unknown nodes
366 # ignore tags to unknown nodes
367 self.changelog.lookup(node)
367 self.changelog.lookup(node)
368 tags[encoding.tolocal(name)] = node
368 tags[encoding.tolocal(name)] = node
369 except error.LookupError:
369 except error.LookupError:
370 pass
370 pass
371 tags['tip'] = self.changelog.tip()
371 tags['tip'] = self.changelog.tip()
372 tagtypes = dict([(encoding.tolocal(name), value)
372 tagtypes = dict([(encoding.tolocal(name), value)
373 for (name, value) in tagtypes.iteritems()])
373 for (name, value) in tagtypes.iteritems()])
374 return (tags, tagtypes)
374 return (tags, tagtypes)
375
375
376 def tagtype(self, tagname):
376 def tagtype(self, tagname):
377 '''
377 '''
378 return the type of the given tag. result can be:
378 return the type of the given tag. result can be:
379
379
380 'local' : a local tag
380 'local' : a local tag
381 'global' : a global tag
381 'global' : a global tag
382 None : tag does not exist
382 None : tag does not exist
383 '''
383 '''
384
384
385 self.tags()
385 self.tags()
386
386
387 return self._tagtypes.get(tagname)
387 return self._tagtypes.get(tagname)
388
388
389 def tagslist(self):
389 def tagslist(self):
390 '''return a list of tags ordered by revision'''
390 '''return a list of tags ordered by revision'''
391 l = []
391 l = []
392 for t, n in self.tags().iteritems():
392 for t, n in self.tags().iteritems():
393 try:
394 r = self.changelog.rev(n)
393 r = self.changelog.rev(n)
395 except error.LookupError:
396 r = -2 # sort to the beginning of the list if unknown
397 l.append((r, t, n))
394 l.append((r, t, n))
398 return [(t, n) for r, t, n in sorted(l)]
395 return [(t, n) for r, t, n in sorted(l)]
399
396
400 def nodetags(self, node):
397 def nodetags(self, node):
401 '''return the tags associated with a node'''
398 '''return the tags associated with a node'''
402 if not self.nodetagscache:
399 if not self.nodetagscache:
403 self.nodetagscache = {}
400 self.nodetagscache = {}
404 for t, n in self.tags().iteritems():
401 for t, n in self.tags().iteritems():
405 self.nodetagscache.setdefault(n, []).append(t)
402 self.nodetagscache.setdefault(n, []).append(t)
406 for tags in self.nodetagscache.itervalues():
403 for tags in self.nodetagscache.itervalues():
407 tags.sort()
404 tags.sort()
408 return self.nodetagscache.get(node, [])
405 return self.nodetagscache.get(node, [])
409
406
410 def nodebookmarks(self, node):
407 def nodebookmarks(self, node):
411 marks = []
408 marks = []
412 for bookmark, n in self._bookmarks.iteritems():
409 for bookmark, n in self._bookmarks.iteritems():
413 if n == node:
410 if n == node:
414 marks.append(bookmark)
411 marks.append(bookmark)
415 return sorted(marks)
412 return sorted(marks)
416
413
417 def _branchtags(self, partial, lrev):
414 def _branchtags(self, partial, lrev):
418 # TODO: rename this function?
415 # TODO: rename this function?
419 tiprev = len(self) - 1
416 tiprev = len(self) - 1
420 if lrev != tiprev:
417 if lrev != tiprev:
421 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
418 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
422 self._updatebranchcache(partial, ctxgen)
419 self._updatebranchcache(partial, ctxgen)
423 self._writebranchcache(partial, self.changelog.tip(), tiprev)
420 self._writebranchcache(partial, self.changelog.tip(), tiprev)
424
421
425 return partial
422 return partial
426
423
427 def updatebranchcache(self):
424 def updatebranchcache(self):
428 tip = self.changelog.tip()
425 tip = self.changelog.tip()
429 if self._branchcache is not None and self._branchcachetip == tip:
426 if self._branchcache is not None and self._branchcachetip == tip:
430 return self._branchcache
427 return self._branchcache
431
428
432 oldtip = self._branchcachetip
429 oldtip = self._branchcachetip
433 self._branchcachetip = tip
430 self._branchcachetip = tip
434 if oldtip is None or oldtip not in self.changelog.nodemap:
431 if oldtip is None or oldtip not in self.changelog.nodemap:
435 partial, last, lrev = self._readbranchcache()
432 partial, last, lrev = self._readbranchcache()
436 else:
433 else:
437 lrev = self.changelog.rev(oldtip)
434 lrev = self.changelog.rev(oldtip)
438 partial = self._branchcache
435 partial = self._branchcache
439
436
440 self._branchtags(partial, lrev)
437 self._branchtags(partial, lrev)
441 # this private cache holds all heads (not just tips)
438 # this private cache holds all heads (not just tips)
442 self._branchcache = partial
439 self._branchcache = partial
443
440
444 def branchmap(self):
441 def branchmap(self):
445 '''returns a dictionary {branch: [branchheads]}'''
442 '''returns a dictionary {branch: [branchheads]}'''
446 self.updatebranchcache()
443 self.updatebranchcache()
447 return self._branchcache
444 return self._branchcache
448
445
449 def branchtags(self):
446 def branchtags(self):
450 '''return a dict where branch names map to the tipmost head of
447 '''return a dict where branch names map to the tipmost head of
451 the branch, open heads come before closed'''
448 the branch, open heads come before closed'''
452 bt = {}
449 bt = {}
453 for bn, heads in self.branchmap().iteritems():
450 for bn, heads in self.branchmap().iteritems():
454 tip = heads[-1]
451 tip = heads[-1]
455 for h in reversed(heads):
452 for h in reversed(heads):
456 if 'close' not in self.changelog.read(h)[5]:
453 if 'close' not in self.changelog.read(h)[5]:
457 tip = h
454 tip = h
458 break
455 break
459 bt[bn] = tip
456 bt[bn] = tip
460 return bt
457 return bt
461
458
462 def _readbranchcache(self):
459 def _readbranchcache(self):
463 partial = {}
460 partial = {}
464 try:
461 try:
465 f = self.opener("cache/branchheads")
462 f = self.opener("cache/branchheads")
466 lines = f.read().split('\n')
463 lines = f.read().split('\n')
467 f.close()
464 f.close()
468 except (IOError, OSError):
465 except (IOError, OSError):
469 return {}, nullid, nullrev
466 return {}, nullid, nullrev
470
467
471 try:
468 try:
472 last, lrev = lines.pop(0).split(" ", 1)
469 last, lrev = lines.pop(0).split(" ", 1)
473 last, lrev = bin(last), int(lrev)
470 last, lrev = bin(last), int(lrev)
474 if lrev >= len(self) or self[lrev].node() != last:
471 if lrev >= len(self) or self[lrev].node() != last:
475 # invalidate the cache
472 # invalidate the cache
476 raise ValueError('invalidating branch cache (tip differs)')
473 raise ValueError('invalidating branch cache (tip differs)')
477 for l in lines:
474 for l in lines:
478 if not l:
475 if not l:
479 continue
476 continue
480 node, label = l.split(" ", 1)
477 node, label = l.split(" ", 1)
481 label = encoding.tolocal(label.strip())
478 label = encoding.tolocal(label.strip())
482 partial.setdefault(label, []).append(bin(node))
479 partial.setdefault(label, []).append(bin(node))
483 except KeyboardInterrupt:
480 except KeyboardInterrupt:
484 raise
481 raise
485 except Exception, inst:
482 except Exception, inst:
486 if self.ui.debugflag:
483 if self.ui.debugflag:
487 self.ui.warn(str(inst), '\n')
484 self.ui.warn(str(inst), '\n')
488 partial, last, lrev = {}, nullid, nullrev
485 partial, last, lrev = {}, nullid, nullrev
489 return partial, last, lrev
486 return partial, last, lrev
490
487
491 def _writebranchcache(self, branches, tip, tiprev):
488 def _writebranchcache(self, branches, tip, tiprev):
492 try:
489 try:
493 f = self.opener("cache/branchheads", "w", atomictemp=True)
490 f = self.opener("cache/branchheads", "w", atomictemp=True)
494 f.write("%s %s\n" % (hex(tip), tiprev))
491 f.write("%s %s\n" % (hex(tip), tiprev))
495 for label, nodes in branches.iteritems():
492 for label, nodes in branches.iteritems():
496 for node in nodes:
493 for node in nodes:
497 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
494 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
498 f.rename()
495 f.rename()
499 except (IOError, OSError):
496 except (IOError, OSError):
500 pass
497 pass
501
498
502 def _updatebranchcache(self, partial, ctxgen):
499 def _updatebranchcache(self, partial, ctxgen):
503 # collect new branch entries
500 # collect new branch entries
504 newbranches = {}
501 newbranches = {}
505 for c in ctxgen:
502 for c in ctxgen:
506 newbranches.setdefault(c.branch(), []).append(c.node())
503 newbranches.setdefault(c.branch(), []).append(c.node())
507 # if older branchheads are reachable from new ones, they aren't
504 # if older branchheads are reachable from new ones, they aren't
508 # really branchheads. Note checking parents is insufficient:
505 # really branchheads. Note checking parents is insufficient:
509 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
506 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
510 for branch, newnodes in newbranches.iteritems():
507 for branch, newnodes in newbranches.iteritems():
511 bheads = partial.setdefault(branch, [])
508 bheads = partial.setdefault(branch, [])
512 bheads.extend(newnodes)
509 bheads.extend(newnodes)
513 if len(bheads) <= 1:
510 if len(bheads) <= 1:
514 continue
511 continue
515 # starting from tip means fewer passes over reachable
512 # starting from tip means fewer passes over reachable
516 while newnodes:
513 while newnodes:
517 latest = newnodes.pop()
514 latest = newnodes.pop()
518 if latest not in bheads:
515 if latest not in bheads:
519 continue
516 continue
520 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
517 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
521 reachable = self.changelog.reachable(latest, minbhrev)
518 reachable = self.changelog.reachable(latest, minbhrev)
522 reachable.remove(latest)
519 reachable.remove(latest)
523 bheads = [b for b in bheads if b not in reachable]
520 bheads = [b for b in bheads if b not in reachable]
524 partial[branch] = bheads
521 partial[branch] = bheads
525
522
526 def lookup(self, key):
523 def lookup(self, key):
527 if isinstance(key, int):
524 if isinstance(key, int):
528 return self.changelog.node(key)
525 return self.changelog.node(key)
529 elif key == '.':
526 elif key == '.':
530 return self.dirstate.p1()
527 return self.dirstate.p1()
531 elif key == 'null':
528 elif key == 'null':
532 return nullid
529 return nullid
533 elif key == 'tip':
530 elif key == 'tip':
534 return self.changelog.tip()
531 return self.changelog.tip()
535 n = self.changelog._match(key)
532 n = self.changelog._match(key)
536 if n:
533 if n:
537 return n
534 return n
538 if key in self._bookmarks:
535 if key in self._bookmarks:
539 return self._bookmarks[key]
536 return self._bookmarks[key]
540 if key in self.tags():
537 if key in self.tags():
541 return self.tags()[key]
538 return self.tags()[key]
542 if key in self.branchtags():
539 if key in self.branchtags():
543 return self.branchtags()[key]
540 return self.branchtags()[key]
544 n = self.changelog._partialmatch(key)
541 n = self.changelog._partialmatch(key)
545 if n:
542 if n:
546 return n
543 return n
547
544
548 # can't find key, check if it might have come from damaged dirstate
545 # can't find key, check if it might have come from damaged dirstate
549 if key in self.dirstate.parents():
546 if key in self.dirstate.parents():
550 raise error.Abort(_("working directory has unknown parent '%s'!")
547 raise error.Abort(_("working directory has unknown parent '%s'!")
551 % short(key))
548 % short(key))
552 try:
549 try:
553 if len(key) == 20:
550 if len(key) == 20:
554 key = hex(key)
551 key = hex(key)
555 except:
552 except:
556 pass
553 pass
557 raise error.RepoLookupError(_("unknown revision '%s'") % key)
554 raise error.RepoLookupError(_("unknown revision '%s'") % key)
558
555
559 def lookupbranch(self, key, remote=None):
556 def lookupbranch(self, key, remote=None):
560 repo = remote or self
557 repo = remote or self
561 if key in repo.branchmap():
558 if key in repo.branchmap():
562 return key
559 return key
563
560
564 repo = (remote and remote.local()) and remote or self
561 repo = (remote and remote.local()) and remote or self
565 return repo[key].branch()
562 return repo[key].branch()
566
563
567 def known(self, nodes):
564 def known(self, nodes):
568 nm = self.changelog.nodemap
565 nm = self.changelog.nodemap
569 return [(n in nm) for n in nodes]
566 return [(n in nm) for n in nodes]
570
567
571 def local(self):
568 def local(self):
572 return True
569 return True
573
570
574 def join(self, f):
571 def join(self, f):
575 return os.path.join(self.path, f)
572 return os.path.join(self.path, f)
576
573
577 def wjoin(self, f):
574 def wjoin(self, f):
578 return os.path.join(self.root, f)
575 return os.path.join(self.root, f)
579
576
580 def file(self, f):
577 def file(self, f):
581 if f[0] == '/':
578 if f[0] == '/':
582 f = f[1:]
579 f = f[1:]
583 return filelog.filelog(self.sopener, f)
580 return filelog.filelog(self.sopener, f)
584
581
585 def changectx(self, changeid):
582 def changectx(self, changeid):
586 return self[changeid]
583 return self[changeid]
587
584
588 def parents(self, changeid=None):
585 def parents(self, changeid=None):
589 '''get list of changectxs for parents of changeid'''
586 '''get list of changectxs for parents of changeid'''
590 return self[changeid].parents()
587 return self[changeid].parents()
591
588
592 def filectx(self, path, changeid=None, fileid=None):
589 def filectx(self, path, changeid=None, fileid=None):
593 """changeid can be a changeset revision, node, or tag.
590 """changeid can be a changeset revision, node, or tag.
594 fileid can be a file revision or node."""
591 fileid can be a file revision or node."""
595 return context.filectx(self, path, changeid, fileid)
592 return context.filectx(self, path, changeid, fileid)
596
593
597 def getcwd(self):
594 def getcwd(self):
598 return self.dirstate.getcwd()
595 return self.dirstate.getcwd()
599
596
600 def pathto(self, f, cwd=None):
597 def pathto(self, f, cwd=None):
601 return self.dirstate.pathto(f, cwd)
598 return self.dirstate.pathto(f, cwd)
602
599
603 def wfile(self, f, mode='r'):
600 def wfile(self, f, mode='r'):
604 return self.wopener(f, mode)
601 return self.wopener(f, mode)
605
602
606 def _link(self, f):
603 def _link(self, f):
607 return os.path.islink(self.wjoin(f))
604 return os.path.islink(self.wjoin(f))
608
605
609 def _loadfilter(self, filter):
606 def _loadfilter(self, filter):
610 if filter not in self.filterpats:
607 if filter not in self.filterpats:
611 l = []
608 l = []
612 for pat, cmd in self.ui.configitems(filter):
609 for pat, cmd in self.ui.configitems(filter):
613 if cmd == '!':
610 if cmd == '!':
614 continue
611 continue
615 mf = matchmod.match(self.root, '', [pat])
612 mf = matchmod.match(self.root, '', [pat])
616 fn = None
613 fn = None
617 params = cmd
614 params = cmd
618 for name, filterfn in self._datafilters.iteritems():
615 for name, filterfn in self._datafilters.iteritems():
619 if cmd.startswith(name):
616 if cmd.startswith(name):
620 fn = filterfn
617 fn = filterfn
621 params = cmd[len(name):].lstrip()
618 params = cmd[len(name):].lstrip()
622 break
619 break
623 if not fn:
620 if not fn:
624 fn = lambda s, c, **kwargs: util.filter(s, c)
621 fn = lambda s, c, **kwargs: util.filter(s, c)
625 # Wrap old filters not supporting keyword arguments
622 # Wrap old filters not supporting keyword arguments
626 if not inspect.getargspec(fn)[2]:
623 if not inspect.getargspec(fn)[2]:
627 oldfn = fn
624 oldfn = fn
628 fn = lambda s, c, **kwargs: oldfn(s, c)
625 fn = lambda s, c, **kwargs: oldfn(s, c)
629 l.append((mf, fn, params))
626 l.append((mf, fn, params))
630 self.filterpats[filter] = l
627 self.filterpats[filter] = l
631 return self.filterpats[filter]
628 return self.filterpats[filter]
632
629
633 def _filter(self, filterpats, filename, data):
630 def _filter(self, filterpats, filename, data):
634 for mf, fn, cmd in filterpats:
631 for mf, fn, cmd in filterpats:
635 if mf(filename):
632 if mf(filename):
636 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
633 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
637 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
634 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
638 break
635 break
639
636
640 return data
637 return data
641
638
642 @propertycache
639 @propertycache
643 def _encodefilterpats(self):
640 def _encodefilterpats(self):
644 return self._loadfilter('encode')
641 return self._loadfilter('encode')
645
642
646 @propertycache
643 @propertycache
647 def _decodefilterpats(self):
644 def _decodefilterpats(self):
648 return self._loadfilter('decode')
645 return self._loadfilter('decode')
649
646
650 def adddatafilter(self, name, filter):
647 def adddatafilter(self, name, filter):
651 self._datafilters[name] = filter
648 self._datafilters[name] = filter
652
649
653 def wread(self, filename):
650 def wread(self, filename):
654 if self._link(filename):
651 if self._link(filename):
655 data = os.readlink(self.wjoin(filename))
652 data = os.readlink(self.wjoin(filename))
656 else:
653 else:
657 data = self.wopener(filename, 'r').read()
654 data = self.wopener(filename, 'r').read()
658 return self._filter(self._encodefilterpats, filename, data)
655 return self._filter(self._encodefilterpats, filename, data)
659
656
660 def wwrite(self, filename, data, flags):
657 def wwrite(self, filename, data, flags):
661 data = self._filter(self._decodefilterpats, filename, data)
658 data = self._filter(self._decodefilterpats, filename, data)
662 if 'l' in flags:
659 if 'l' in flags:
663 self.wopener.symlink(data, filename)
660 self.wopener.symlink(data, filename)
664 else:
661 else:
665 self.wopener(filename, 'w').write(data)
662 self.wopener(filename, 'w').write(data)
666 if 'x' in flags:
663 if 'x' in flags:
667 util.set_flags(self.wjoin(filename), False, True)
664 util.set_flags(self.wjoin(filename), False, True)
668
665
669 def wwritedata(self, filename, data):
666 def wwritedata(self, filename, data):
670 return self._filter(self._decodefilterpats, filename, data)
667 return self._filter(self._decodefilterpats, filename, data)
671
668
672 def transaction(self, desc):
669 def transaction(self, desc):
673 tr = self._transref and self._transref() or None
670 tr = self._transref and self._transref() or None
674 if tr and tr.running():
671 if tr and tr.running():
675 return tr.nest()
672 return tr.nest()
676
673
677 # abort here if the journal already exists
674 # abort here if the journal already exists
678 if os.path.exists(self.sjoin("journal")):
675 if os.path.exists(self.sjoin("journal")):
679 raise error.RepoError(
676 raise error.RepoError(
680 _("abandoned transaction found - run hg recover"))
677 _("abandoned transaction found - run hg recover"))
681
678
682 # save dirstate for rollback
679 # save dirstate for rollback
683 try:
680 try:
684 ds = self.opener("dirstate").read()
681 ds = self.opener("dirstate").read()
685 except IOError:
682 except IOError:
686 ds = ""
683 ds = ""
687 self.opener("journal.dirstate", "w").write(ds)
684 self.opener("journal.dirstate", "w").write(ds)
688 self.opener("journal.branch", "w").write(
685 self.opener("journal.branch", "w").write(
689 encoding.fromlocal(self.dirstate.branch()))
686 encoding.fromlocal(self.dirstate.branch()))
690 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
687 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
691
688
692 renames = [(self.sjoin("journal"), self.sjoin("undo")),
689 renames = [(self.sjoin("journal"), self.sjoin("undo")),
693 (self.join("journal.dirstate"), self.join("undo.dirstate")),
690 (self.join("journal.dirstate"), self.join("undo.dirstate")),
694 (self.join("journal.branch"), self.join("undo.branch")),
691 (self.join("journal.branch"), self.join("undo.branch")),
695 (self.join("journal.desc"), self.join("undo.desc"))]
692 (self.join("journal.desc"), self.join("undo.desc"))]
696 tr = transaction.transaction(self.ui.warn, self.sopener,
693 tr = transaction.transaction(self.ui.warn, self.sopener,
697 self.sjoin("journal"),
694 self.sjoin("journal"),
698 aftertrans(renames),
695 aftertrans(renames),
699 self.store.createmode)
696 self.store.createmode)
700 self._transref = weakref.ref(tr)
697 self._transref = weakref.ref(tr)
701 return tr
698 return tr
702
699
703 def recover(self):
700 def recover(self):
704 lock = self.lock()
701 lock = self.lock()
705 try:
702 try:
706 if os.path.exists(self.sjoin("journal")):
703 if os.path.exists(self.sjoin("journal")):
707 self.ui.status(_("rolling back interrupted transaction\n"))
704 self.ui.status(_("rolling back interrupted transaction\n"))
708 transaction.rollback(self.sopener, self.sjoin("journal"),
705 transaction.rollback(self.sopener, self.sjoin("journal"),
709 self.ui.warn)
706 self.ui.warn)
710 self.invalidate()
707 self.invalidate()
711 return True
708 return True
712 else:
709 else:
713 self.ui.warn(_("no interrupted transaction available\n"))
710 self.ui.warn(_("no interrupted transaction available\n"))
714 return False
711 return False
715 finally:
712 finally:
716 lock.release()
713 lock.release()
717
714
718 def rollback(self, dryrun=False):
715 def rollback(self, dryrun=False):
719 wlock = lock = None
716 wlock = lock = None
720 try:
717 try:
721 wlock = self.wlock()
718 wlock = self.wlock()
722 lock = self.lock()
719 lock = self.lock()
723 if os.path.exists(self.sjoin("undo")):
720 if os.path.exists(self.sjoin("undo")):
724 try:
721 try:
725 args = self.opener("undo.desc", "r").read().splitlines()
722 args = self.opener("undo.desc", "r").read().splitlines()
726 if len(args) >= 3 and self.ui.verbose:
723 if len(args) >= 3 and self.ui.verbose:
727 desc = _("repository tip rolled back to revision %s"
724 desc = _("repository tip rolled back to revision %s"
728 " (undo %s: %s)\n") % (
725 " (undo %s: %s)\n") % (
729 int(args[0]) - 1, args[1], args[2])
726 int(args[0]) - 1, args[1], args[2])
730 elif len(args) >= 2:
727 elif len(args) >= 2:
731 desc = _("repository tip rolled back to revision %s"
728 desc = _("repository tip rolled back to revision %s"
732 " (undo %s)\n") % (
729 " (undo %s)\n") % (
733 int(args[0]) - 1, args[1])
730 int(args[0]) - 1, args[1])
734 except IOError:
731 except IOError:
735 desc = _("rolling back unknown transaction\n")
732 desc = _("rolling back unknown transaction\n")
736 self.ui.status(desc)
733 self.ui.status(desc)
737 if dryrun:
734 if dryrun:
738 return
735 return
739 transaction.rollback(self.sopener, self.sjoin("undo"),
736 transaction.rollback(self.sopener, self.sjoin("undo"),
740 self.ui.warn)
737 self.ui.warn)
741 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
738 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
742 if os.path.exists(self.join('undo.bookmarks')):
739 if os.path.exists(self.join('undo.bookmarks')):
743 util.rename(self.join('undo.bookmarks'),
740 util.rename(self.join('undo.bookmarks'),
744 self.join('bookmarks'))
741 self.join('bookmarks'))
745 try:
742 try:
746 branch = self.opener("undo.branch").read()
743 branch = self.opener("undo.branch").read()
747 self.dirstate.setbranch(branch)
744 self.dirstate.setbranch(branch)
748 except IOError:
745 except IOError:
749 self.ui.warn(_("Named branch could not be reset, "
746 self.ui.warn(_("Named branch could not be reset, "
750 "current branch still is: %s\n")
747 "current branch still is: %s\n")
751 % self.dirstate.branch())
748 % self.dirstate.branch())
752 self.invalidate()
749 self.invalidate()
753 self.dirstate.invalidate()
750 self.dirstate.invalidate()
754 self.destroyed()
751 self.destroyed()
755 parents = tuple([p.rev() for p in self.parents()])
752 parents = tuple([p.rev() for p in self.parents()])
756 if len(parents) > 1:
753 if len(parents) > 1:
757 self.ui.status(_("working directory now based on "
754 self.ui.status(_("working directory now based on "
758 "revisions %d and %d\n") % parents)
755 "revisions %d and %d\n") % parents)
759 else:
756 else:
760 self.ui.status(_("working directory now based on "
757 self.ui.status(_("working directory now based on "
761 "revision %d\n") % parents)
758 "revision %d\n") % parents)
762 else:
759 else:
763 self.ui.warn(_("no rollback information available\n"))
760 self.ui.warn(_("no rollback information available\n"))
764 return 1
761 return 1
765 finally:
762 finally:
766 release(lock, wlock)
763 release(lock, wlock)
767
764
768 def invalidatecaches(self):
765 def invalidatecaches(self):
769 self._tags = None
766 self._tags = None
770 self._tagtypes = None
767 self._tagtypes = None
771 self.nodetagscache = None
768 self.nodetagscache = None
772 self._branchcache = None # in UTF-8
769 self._branchcache = None # in UTF-8
773 self._branchcachetip = None
770 self._branchcachetip = None
774
771
775 def invalidate(self):
772 def invalidate(self):
776 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkcurrent"):
773 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkcurrent"):
777 if a in self.__dict__:
774 if a in self.__dict__:
778 delattr(self, a)
775 delattr(self, a)
779 self.invalidatecaches()
776 self.invalidatecaches()
780
777
781 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
778 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
782 try:
779 try:
783 l = lock.lock(lockname, 0, releasefn, desc=desc)
780 l = lock.lock(lockname, 0, releasefn, desc=desc)
784 except error.LockHeld, inst:
781 except error.LockHeld, inst:
785 if not wait:
782 if not wait:
786 raise
783 raise
787 self.ui.warn(_("waiting for lock on %s held by %r\n") %
784 self.ui.warn(_("waiting for lock on %s held by %r\n") %
788 (desc, inst.locker))
785 (desc, inst.locker))
789 # default to 600 seconds timeout
786 # default to 600 seconds timeout
790 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
787 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
791 releasefn, desc=desc)
788 releasefn, desc=desc)
792 if acquirefn:
789 if acquirefn:
793 acquirefn()
790 acquirefn()
794 return l
791 return l
795
792
796 def lock(self, wait=True):
793 def lock(self, wait=True):
797 '''Lock the repository store (.hg/store) and return a weak reference
794 '''Lock the repository store (.hg/store) and return a weak reference
798 to the lock. Use this before modifying the store (e.g. committing or
795 to the lock. Use this before modifying the store (e.g. committing or
799 stripping). If you are opening a transaction, get a lock as well.)'''
796 stripping). If you are opening a transaction, get a lock as well.)'''
800 l = self._lockref and self._lockref()
797 l = self._lockref and self._lockref()
801 if l is not None and l.held:
798 if l is not None and l.held:
802 l.lock()
799 l.lock()
803 return l
800 return l
804
801
805 l = self._lock(self.sjoin("lock"), wait, self.store.write,
802 l = self._lock(self.sjoin("lock"), wait, self.store.write,
806 self.invalidate, _('repository %s') % self.origroot)
803 self.invalidate, _('repository %s') % self.origroot)
807 self._lockref = weakref.ref(l)
804 self._lockref = weakref.ref(l)
808 return l
805 return l
809
806
810 def wlock(self, wait=True):
807 def wlock(self, wait=True):
811 '''Lock the non-store parts of the repository (everything under
808 '''Lock the non-store parts of the repository (everything under
812 .hg except .hg/store) and return a weak reference to the lock.
809 .hg except .hg/store) and return a weak reference to the lock.
813 Use this before modifying files in .hg.'''
810 Use this before modifying files in .hg.'''
814 l = self._wlockref and self._wlockref()
811 l = self._wlockref and self._wlockref()
815 if l is not None and l.held:
812 if l is not None and l.held:
816 l.lock()
813 l.lock()
817 return l
814 return l
818
815
819 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
816 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
820 self.dirstate.invalidate, _('working directory of %s') %
817 self.dirstate.invalidate, _('working directory of %s') %
821 self.origroot)
818 self.origroot)
822 self._wlockref = weakref.ref(l)
819 self._wlockref = weakref.ref(l)
823 return l
820 return l
824
821
825 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
822 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
826 """
823 """
827 commit an individual file as part of a larger transaction
824 commit an individual file as part of a larger transaction
828 """
825 """
829
826
830 fname = fctx.path()
827 fname = fctx.path()
831 text = fctx.data()
828 text = fctx.data()
832 flog = self.file(fname)
829 flog = self.file(fname)
833 fparent1 = manifest1.get(fname, nullid)
830 fparent1 = manifest1.get(fname, nullid)
834 fparent2 = fparent2o = manifest2.get(fname, nullid)
831 fparent2 = fparent2o = manifest2.get(fname, nullid)
835
832
836 meta = {}
833 meta = {}
837 copy = fctx.renamed()
834 copy = fctx.renamed()
838 if copy and copy[0] != fname:
835 if copy and copy[0] != fname:
839 # Mark the new revision of this file as a copy of another
836 # Mark the new revision of this file as a copy of another
840 # file. This copy data will effectively act as a parent
837 # file. This copy data will effectively act as a parent
841 # of this new revision. If this is a merge, the first
838 # of this new revision. If this is a merge, the first
842 # parent will be the nullid (meaning "look up the copy data")
839 # parent will be the nullid (meaning "look up the copy data")
843 # and the second one will be the other parent. For example:
840 # and the second one will be the other parent. For example:
844 #
841 #
845 # 0 --- 1 --- 3 rev1 changes file foo
842 # 0 --- 1 --- 3 rev1 changes file foo
846 # \ / rev2 renames foo to bar and changes it
843 # \ / rev2 renames foo to bar and changes it
847 # \- 2 -/ rev3 should have bar with all changes and
844 # \- 2 -/ rev3 should have bar with all changes and
848 # should record that bar descends from
845 # should record that bar descends from
849 # bar in rev2 and foo in rev1
846 # bar in rev2 and foo in rev1
850 #
847 #
851 # this allows this merge to succeed:
848 # this allows this merge to succeed:
852 #
849 #
853 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
850 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
854 # \ / merging rev3 and rev4 should use bar@rev2
851 # \ / merging rev3 and rev4 should use bar@rev2
855 # \- 2 --- 4 as the merge base
852 # \- 2 --- 4 as the merge base
856 #
853 #
857
854
858 cfname = copy[0]
855 cfname = copy[0]
859 crev = manifest1.get(cfname)
856 crev = manifest1.get(cfname)
860 newfparent = fparent2
857 newfparent = fparent2
861
858
862 if manifest2: # branch merge
859 if manifest2: # branch merge
863 if fparent2 == nullid or crev is None: # copied on remote side
860 if fparent2 == nullid or crev is None: # copied on remote side
864 if cfname in manifest2:
861 if cfname in manifest2:
865 crev = manifest2[cfname]
862 crev = manifest2[cfname]
866 newfparent = fparent1
863 newfparent = fparent1
867
864
868 # find source in nearest ancestor if we've lost track
865 # find source in nearest ancestor if we've lost track
869 if not crev:
866 if not crev:
870 self.ui.debug(" %s: searching for copy revision for %s\n" %
867 self.ui.debug(" %s: searching for copy revision for %s\n" %
871 (fname, cfname))
868 (fname, cfname))
872 for ancestor in self[None].ancestors():
869 for ancestor in self[None].ancestors():
873 if cfname in ancestor:
870 if cfname in ancestor:
874 crev = ancestor[cfname].filenode()
871 crev = ancestor[cfname].filenode()
875 break
872 break
876
873
877 if crev:
874 if crev:
878 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
875 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
879 meta["copy"] = cfname
876 meta["copy"] = cfname
880 meta["copyrev"] = hex(crev)
877 meta["copyrev"] = hex(crev)
881 fparent1, fparent2 = nullid, newfparent
878 fparent1, fparent2 = nullid, newfparent
882 else:
879 else:
883 self.ui.warn(_("warning: can't find ancestor for '%s' "
880 self.ui.warn(_("warning: can't find ancestor for '%s' "
884 "copied from '%s'!\n") % (fname, cfname))
881 "copied from '%s'!\n") % (fname, cfname))
885
882
886 elif fparent2 != nullid:
883 elif fparent2 != nullid:
887 # is one parent an ancestor of the other?
884 # is one parent an ancestor of the other?
888 fparentancestor = flog.ancestor(fparent1, fparent2)
885 fparentancestor = flog.ancestor(fparent1, fparent2)
889 if fparentancestor == fparent1:
886 if fparentancestor == fparent1:
890 fparent1, fparent2 = fparent2, nullid
887 fparent1, fparent2 = fparent2, nullid
891 elif fparentancestor == fparent2:
888 elif fparentancestor == fparent2:
892 fparent2 = nullid
889 fparent2 = nullid
893
890
894 # is the file changed?
891 # is the file changed?
895 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
892 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
896 changelist.append(fname)
893 changelist.append(fname)
897 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
894 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
898
895
899 # are just the flags changed during merge?
896 # are just the flags changed during merge?
900 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
897 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
901 changelist.append(fname)
898 changelist.append(fname)
902
899
903 return fparent1
900 return fparent1
904
901
905 def commit(self, text="", user=None, date=None, match=None, force=False,
902 def commit(self, text="", user=None, date=None, match=None, force=False,
906 editor=False, extra={}):
903 editor=False, extra={}):
907 """Add a new revision to current repository.
904 """Add a new revision to current repository.
908
905
909 Revision information is gathered from the working directory,
906 Revision information is gathered from the working directory,
910 match can be used to filter the committed files. If editor is
907 match can be used to filter the committed files. If editor is
911 supplied, it is called to get a commit message.
908 supplied, it is called to get a commit message.
912 """
909 """
913
910
914 def fail(f, msg):
911 def fail(f, msg):
915 raise util.Abort('%s: %s' % (f, msg))
912 raise util.Abort('%s: %s' % (f, msg))
916
913
917 if not match:
914 if not match:
918 match = matchmod.always(self.root, '')
915 match = matchmod.always(self.root, '')
919
916
920 if not force:
917 if not force:
921 vdirs = []
918 vdirs = []
922 match.dir = vdirs.append
919 match.dir = vdirs.append
923 match.bad = fail
920 match.bad = fail
924
921
925 wlock = self.wlock()
922 wlock = self.wlock()
926 try:
923 try:
927 wctx = self[None]
924 wctx = self[None]
928 merge = len(wctx.parents()) > 1
925 merge = len(wctx.parents()) > 1
929
926
930 if (not force and merge and match and
927 if (not force and merge and match and
931 (match.files() or match.anypats())):
928 (match.files() or match.anypats())):
932 raise util.Abort(_('cannot partially commit a merge '
929 raise util.Abort(_('cannot partially commit a merge '
933 '(do not specify files or patterns)'))
930 '(do not specify files or patterns)'))
934
931
935 changes = self.status(match=match, clean=force)
932 changes = self.status(match=match, clean=force)
936 if force:
933 if force:
937 changes[0].extend(changes[6]) # mq may commit unchanged files
934 changes[0].extend(changes[6]) # mq may commit unchanged files
938
935
939 # check subrepos
936 # check subrepos
940 subs = []
937 subs = []
941 removedsubs = set()
938 removedsubs = set()
942 for p in wctx.parents():
939 for p in wctx.parents():
943 removedsubs.update(s for s in p.substate if match(s))
940 removedsubs.update(s for s in p.substate if match(s))
944 for s in wctx.substate:
941 for s in wctx.substate:
945 removedsubs.discard(s)
942 removedsubs.discard(s)
946 if match(s) and wctx.sub(s).dirty():
943 if match(s) and wctx.sub(s).dirty():
947 subs.append(s)
944 subs.append(s)
948 if (subs or removedsubs):
945 if (subs or removedsubs):
949 if (not match('.hgsub') and
946 if (not match('.hgsub') and
950 '.hgsub' in (wctx.modified() + wctx.added())):
947 '.hgsub' in (wctx.modified() + wctx.added())):
951 raise util.Abort(_("can't commit subrepos without .hgsub"))
948 raise util.Abort(_("can't commit subrepos without .hgsub"))
952 if '.hgsubstate' not in changes[0]:
949 if '.hgsubstate' not in changes[0]:
953 changes[0].insert(0, '.hgsubstate')
950 changes[0].insert(0, '.hgsubstate')
954
951
955 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
952 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
956 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
953 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
957 if changedsubs:
954 if changedsubs:
958 raise util.Abort(_("uncommitted changes in subrepo %s")
955 raise util.Abort(_("uncommitted changes in subrepo %s")
959 % changedsubs[0])
956 % changedsubs[0])
960
957
961 # make sure all explicit patterns are matched
958 # make sure all explicit patterns are matched
962 if not force and match.files():
959 if not force and match.files():
963 matched = set(changes[0] + changes[1] + changes[2])
960 matched = set(changes[0] + changes[1] + changes[2])
964
961
965 for f in match.files():
962 for f in match.files():
966 if f == '.' or f in matched or f in wctx.substate:
963 if f == '.' or f in matched or f in wctx.substate:
967 continue
964 continue
968 if f in changes[3]: # missing
965 if f in changes[3]: # missing
969 fail(f, _('file not found!'))
966 fail(f, _('file not found!'))
970 if f in vdirs: # visited directory
967 if f in vdirs: # visited directory
971 d = f + '/'
968 d = f + '/'
972 for mf in matched:
969 for mf in matched:
973 if mf.startswith(d):
970 if mf.startswith(d):
974 break
971 break
975 else:
972 else:
976 fail(f, _("no match under directory!"))
973 fail(f, _("no match under directory!"))
977 elif f not in self.dirstate:
974 elif f not in self.dirstate:
978 fail(f, _("file not tracked!"))
975 fail(f, _("file not tracked!"))
979
976
980 if (not force and not extra.get("close") and not merge
977 if (not force and not extra.get("close") and not merge
981 and not (changes[0] or changes[1] or changes[2])
978 and not (changes[0] or changes[1] or changes[2])
982 and wctx.branch() == wctx.p1().branch()):
979 and wctx.branch() == wctx.p1().branch()):
983 return None
980 return None
984
981
985 ms = mergemod.mergestate(self)
982 ms = mergemod.mergestate(self)
986 for f in changes[0]:
983 for f in changes[0]:
987 if f in ms and ms[f] == 'u':
984 if f in ms and ms[f] == 'u':
988 raise util.Abort(_("unresolved merge conflicts "
985 raise util.Abort(_("unresolved merge conflicts "
989 "(see hg help resolve)"))
986 "(see hg help resolve)"))
990
987
991 cctx = context.workingctx(self, text, user, date, extra, changes)
988 cctx = context.workingctx(self, text, user, date, extra, changes)
992 if editor:
989 if editor:
993 cctx._text = editor(self, cctx, subs)
990 cctx._text = editor(self, cctx, subs)
994 edited = (text != cctx._text)
991 edited = (text != cctx._text)
995
992
996 # commit subs
993 # commit subs
997 if subs or removedsubs:
994 if subs or removedsubs:
998 state = wctx.substate.copy()
995 state = wctx.substate.copy()
999 for s in sorted(subs):
996 for s in sorted(subs):
1000 sub = wctx.sub(s)
997 sub = wctx.sub(s)
1001 self.ui.status(_('committing subrepository %s\n') %
998 self.ui.status(_('committing subrepository %s\n') %
1002 subrepo.subrelpath(sub))
999 subrepo.subrelpath(sub))
1003 sr = sub.commit(cctx._text, user, date)
1000 sr = sub.commit(cctx._text, user, date)
1004 state[s] = (state[s][0], sr)
1001 state[s] = (state[s][0], sr)
1005 subrepo.writestate(self, state)
1002 subrepo.writestate(self, state)
1006
1003
1007 # Save commit message in case this transaction gets rolled back
1004 # Save commit message in case this transaction gets rolled back
1008 # (e.g. by a pretxncommit hook). Leave the content alone on
1005 # (e.g. by a pretxncommit hook). Leave the content alone on
1009 # the assumption that the user will use the same editor again.
1006 # the assumption that the user will use the same editor again.
1010 msgfile = self.opener('last-message.txt', 'wb')
1007 msgfile = self.opener('last-message.txt', 'wb')
1011 msgfile.write(cctx._text)
1008 msgfile.write(cctx._text)
1012 msgfile.close()
1009 msgfile.close()
1013
1010
1014 p1, p2 = self.dirstate.parents()
1011 p1, p2 = self.dirstate.parents()
1015 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1012 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1016 try:
1013 try:
1017 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1014 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1018 ret = self.commitctx(cctx, True)
1015 ret = self.commitctx(cctx, True)
1019 except:
1016 except:
1020 if edited:
1017 if edited:
1021 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
1018 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
1022 self.ui.write(
1019 self.ui.write(
1023 _('note: commit message saved in %s\n') % msgfn)
1020 _('note: commit message saved in %s\n') % msgfn)
1024 raise
1021 raise
1025
1022
1026 # update bookmarks, dirstate and mergestate
1023 # update bookmarks, dirstate and mergestate
1027 bookmarks.update(self, p1, ret)
1024 bookmarks.update(self, p1, ret)
1028 for f in changes[0] + changes[1]:
1025 for f in changes[0] + changes[1]:
1029 self.dirstate.normal(f)
1026 self.dirstate.normal(f)
1030 for f in changes[2]:
1027 for f in changes[2]:
1031 self.dirstate.forget(f)
1028 self.dirstate.forget(f)
1032 self.dirstate.setparents(ret)
1029 self.dirstate.setparents(ret)
1033 ms.reset()
1030 ms.reset()
1034 finally:
1031 finally:
1035 wlock.release()
1032 wlock.release()
1036
1033
1037 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1034 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1038 return ret
1035 return ret
1039
1036
1040 def commitctx(self, ctx, error=False):
1037 def commitctx(self, ctx, error=False):
1041 """Add a new revision to current repository.
1038 """Add a new revision to current repository.
1042 Revision information is passed via the context argument.
1039 Revision information is passed via the context argument.
1043 """
1040 """
1044
1041
1045 tr = lock = None
1042 tr = lock = None
1046 removed = list(ctx.removed())
1043 removed = list(ctx.removed())
1047 p1, p2 = ctx.p1(), ctx.p2()
1044 p1, p2 = ctx.p1(), ctx.p2()
1048 m1 = p1.manifest().copy()
1045 m1 = p1.manifest().copy()
1049 m2 = p2.manifest()
1046 m2 = p2.manifest()
1050 user = ctx.user()
1047 user = ctx.user()
1051
1048
1052 lock = self.lock()
1049 lock = self.lock()
1053 try:
1050 try:
1054 tr = self.transaction("commit")
1051 tr = self.transaction("commit")
1055 trp = weakref.proxy(tr)
1052 trp = weakref.proxy(tr)
1056
1053
1057 # check in files
1054 # check in files
1058 new = {}
1055 new = {}
1059 changed = []
1056 changed = []
1060 linkrev = len(self)
1057 linkrev = len(self)
1061 for f in sorted(ctx.modified() + ctx.added()):
1058 for f in sorted(ctx.modified() + ctx.added()):
1062 self.ui.note(f + "\n")
1059 self.ui.note(f + "\n")
1063 try:
1060 try:
1064 fctx = ctx[f]
1061 fctx = ctx[f]
1065 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1062 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1066 changed)
1063 changed)
1067 m1.set(f, fctx.flags())
1064 m1.set(f, fctx.flags())
1068 except OSError, inst:
1065 except OSError, inst:
1069 self.ui.warn(_("trouble committing %s!\n") % f)
1066 self.ui.warn(_("trouble committing %s!\n") % f)
1070 raise
1067 raise
1071 except IOError, inst:
1068 except IOError, inst:
1072 errcode = getattr(inst, 'errno', errno.ENOENT)
1069 errcode = getattr(inst, 'errno', errno.ENOENT)
1073 if error or errcode and errcode != errno.ENOENT:
1070 if error or errcode and errcode != errno.ENOENT:
1074 self.ui.warn(_("trouble committing %s!\n") % f)
1071 self.ui.warn(_("trouble committing %s!\n") % f)
1075 raise
1072 raise
1076 else:
1073 else:
1077 removed.append(f)
1074 removed.append(f)
1078
1075
1079 # update manifest
1076 # update manifest
1080 m1.update(new)
1077 m1.update(new)
1081 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1078 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1082 drop = [f for f in removed if f in m1]
1079 drop = [f for f in removed if f in m1]
1083 for f in drop:
1080 for f in drop:
1084 del m1[f]
1081 del m1[f]
1085 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1082 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1086 p2.manifestnode(), (new, drop))
1083 p2.manifestnode(), (new, drop))
1087
1084
1088 # update changelog
1085 # update changelog
1089 self.changelog.delayupdate()
1086 self.changelog.delayupdate()
1090 n = self.changelog.add(mn, changed + removed, ctx.description(),
1087 n = self.changelog.add(mn, changed + removed, ctx.description(),
1091 trp, p1.node(), p2.node(),
1088 trp, p1.node(), p2.node(),
1092 user, ctx.date(), ctx.extra().copy())
1089 user, ctx.date(), ctx.extra().copy())
1093 p = lambda: self.changelog.writepending() and self.root or ""
1090 p = lambda: self.changelog.writepending() and self.root or ""
1094 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1091 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1095 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1092 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1096 parent2=xp2, pending=p)
1093 parent2=xp2, pending=p)
1097 self.changelog.finalize(trp)
1094 self.changelog.finalize(trp)
1098 tr.close()
1095 tr.close()
1099
1096
1100 if self._branchcache:
1097 if self._branchcache:
1101 self.updatebranchcache()
1098 self.updatebranchcache()
1102 return n
1099 return n
1103 finally:
1100 finally:
1104 if tr:
1101 if tr:
1105 tr.release()
1102 tr.release()
1106 lock.release()
1103 lock.release()
1107
1104
1108 def destroyed(self):
1105 def destroyed(self):
1109 '''Inform the repository that nodes have been destroyed.
1106 '''Inform the repository that nodes have been destroyed.
1110 Intended for use by strip and rollback, so there's a common
1107 Intended for use by strip and rollback, so there's a common
1111 place for anything that has to be done after destroying history.'''
1108 place for anything that has to be done after destroying history.'''
1112 # XXX it might be nice if we could take the list of destroyed
1109 # XXX it might be nice if we could take the list of destroyed
1113 # nodes, but I don't see an easy way for rollback() to do that
1110 # nodes, but I don't see an easy way for rollback() to do that
1114
1111
1115 # Ensure the persistent tag cache is updated. Doing it now
1112 # Ensure the persistent tag cache is updated. Doing it now
1116 # means that the tag cache only has to worry about destroyed
1113 # means that the tag cache only has to worry about destroyed
1117 # heads immediately after a strip/rollback. That in turn
1114 # heads immediately after a strip/rollback. That in turn
1118 # guarantees that "cachetip == currenttip" (comparing both rev
1115 # guarantees that "cachetip == currenttip" (comparing both rev
1119 # and node) always means no nodes have been added or destroyed.
1116 # and node) always means no nodes have been added or destroyed.
1120
1117
1121 # XXX this is suboptimal when qrefresh'ing: we strip the current
1118 # XXX this is suboptimal when qrefresh'ing: we strip the current
1122 # head, refresh the tag cache, then immediately add a new head.
1119 # head, refresh the tag cache, then immediately add a new head.
1123 # But I think doing it this way is necessary for the "instant
1120 # But I think doing it this way is necessary for the "instant
1124 # tag cache retrieval" case to work.
1121 # tag cache retrieval" case to work.
1125 self.invalidatecaches()
1122 self.invalidatecaches()
1126
1123
1127 def walk(self, match, node=None):
1124 def walk(self, match, node=None):
1128 '''
1125 '''
1129 walk recursively through the directory tree or a given
1126 walk recursively through the directory tree or a given
1130 changeset, finding all files matched by the match
1127 changeset, finding all files matched by the match
1131 function
1128 function
1132 '''
1129 '''
1133 return self[node].walk(match)
1130 return self[node].walk(match)
1134
1131
1135 def status(self, node1='.', node2=None, match=None,
1132 def status(self, node1='.', node2=None, match=None,
1136 ignored=False, clean=False, unknown=False,
1133 ignored=False, clean=False, unknown=False,
1137 listsubrepos=False):
1134 listsubrepos=False):
1138 """return status of files between two nodes or node and working directory
1135 """return status of files between two nodes or node and working directory
1139
1136
1140 If node1 is None, use the first dirstate parent instead.
1137 If node1 is None, use the first dirstate parent instead.
1141 If node2 is None, compare node1 with working directory.
1138 If node2 is None, compare node1 with working directory.
1142 """
1139 """
1143
1140
1144 def mfmatches(ctx):
1141 def mfmatches(ctx):
1145 mf = ctx.manifest().copy()
1142 mf = ctx.manifest().copy()
1146 for fn in mf.keys():
1143 for fn in mf.keys():
1147 if not match(fn):
1144 if not match(fn):
1148 del mf[fn]
1145 del mf[fn]
1149 return mf
1146 return mf
1150
1147
1151 if isinstance(node1, context.changectx):
1148 if isinstance(node1, context.changectx):
1152 ctx1 = node1
1149 ctx1 = node1
1153 else:
1150 else:
1154 ctx1 = self[node1]
1151 ctx1 = self[node1]
1155 if isinstance(node2, context.changectx):
1152 if isinstance(node2, context.changectx):
1156 ctx2 = node2
1153 ctx2 = node2
1157 else:
1154 else:
1158 ctx2 = self[node2]
1155 ctx2 = self[node2]
1159
1156
1160 working = ctx2.rev() is None
1157 working = ctx2.rev() is None
1161 parentworking = working and ctx1 == self['.']
1158 parentworking = working and ctx1 == self['.']
1162 match = match or matchmod.always(self.root, self.getcwd())
1159 match = match or matchmod.always(self.root, self.getcwd())
1163 listignored, listclean, listunknown = ignored, clean, unknown
1160 listignored, listclean, listunknown = ignored, clean, unknown
1164
1161
1165 # load earliest manifest first for caching reasons
1162 # load earliest manifest first for caching reasons
1166 if not working and ctx2.rev() < ctx1.rev():
1163 if not working and ctx2.rev() < ctx1.rev():
1167 ctx2.manifest()
1164 ctx2.manifest()
1168
1165
1169 if not parentworking:
1166 if not parentworking:
1170 def bad(f, msg):
1167 def bad(f, msg):
1171 if f not in ctx1:
1168 if f not in ctx1:
1172 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1169 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1173 match.bad = bad
1170 match.bad = bad
1174
1171
1175 if working: # we need to scan the working dir
1172 if working: # we need to scan the working dir
1176 subrepos = []
1173 subrepos = []
1177 if '.hgsub' in self.dirstate:
1174 if '.hgsub' in self.dirstate:
1178 subrepos = ctx1.substate.keys()
1175 subrepos = ctx1.substate.keys()
1179 s = self.dirstate.status(match, subrepos, listignored,
1176 s = self.dirstate.status(match, subrepos, listignored,
1180 listclean, listunknown)
1177 listclean, listunknown)
1181 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1178 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1182
1179
1183 # check for any possibly clean files
1180 # check for any possibly clean files
1184 if parentworking and cmp:
1181 if parentworking and cmp:
1185 fixup = []
1182 fixup = []
1186 # do a full compare of any files that might have changed
1183 # do a full compare of any files that might have changed
1187 for f in sorted(cmp):
1184 for f in sorted(cmp):
1188 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1185 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1189 or ctx1[f].cmp(ctx2[f])):
1186 or ctx1[f].cmp(ctx2[f])):
1190 modified.append(f)
1187 modified.append(f)
1191 else:
1188 else:
1192 fixup.append(f)
1189 fixup.append(f)
1193
1190
1194 # update dirstate for files that are actually clean
1191 # update dirstate for files that are actually clean
1195 if fixup:
1192 if fixup:
1196 if listclean:
1193 if listclean:
1197 clean += fixup
1194 clean += fixup
1198
1195
1199 try:
1196 try:
1200 # updating the dirstate is optional
1197 # updating the dirstate is optional
1201 # so we don't wait on the lock
1198 # so we don't wait on the lock
1202 wlock = self.wlock(False)
1199 wlock = self.wlock(False)
1203 try:
1200 try:
1204 for f in fixup:
1201 for f in fixup:
1205 self.dirstate.normal(f)
1202 self.dirstate.normal(f)
1206 finally:
1203 finally:
1207 wlock.release()
1204 wlock.release()
1208 except error.LockError:
1205 except error.LockError:
1209 pass
1206 pass
1210
1207
1211 if not parentworking:
1208 if not parentworking:
1212 mf1 = mfmatches(ctx1)
1209 mf1 = mfmatches(ctx1)
1213 if working:
1210 if working:
1214 # we are comparing working dir against non-parent
1211 # we are comparing working dir against non-parent
1215 # generate a pseudo-manifest for the working dir
1212 # generate a pseudo-manifest for the working dir
1216 mf2 = mfmatches(self['.'])
1213 mf2 = mfmatches(self['.'])
1217 for f in cmp + modified + added:
1214 for f in cmp + modified + added:
1218 mf2[f] = None
1215 mf2[f] = None
1219 mf2.set(f, ctx2.flags(f))
1216 mf2.set(f, ctx2.flags(f))
1220 for f in removed:
1217 for f in removed:
1221 if f in mf2:
1218 if f in mf2:
1222 del mf2[f]
1219 del mf2[f]
1223 else:
1220 else:
1224 # we are comparing two revisions
1221 # we are comparing two revisions
1225 deleted, unknown, ignored = [], [], []
1222 deleted, unknown, ignored = [], [], []
1226 mf2 = mfmatches(ctx2)
1223 mf2 = mfmatches(ctx2)
1227
1224
1228 modified, added, clean = [], [], []
1225 modified, added, clean = [], [], []
1229 for fn in mf2:
1226 for fn in mf2:
1230 if fn in mf1:
1227 if fn in mf1:
1231 if (mf1.flags(fn) != mf2.flags(fn) or
1228 if (mf1.flags(fn) != mf2.flags(fn) or
1232 (mf1[fn] != mf2[fn] and
1229 (mf1[fn] != mf2[fn] and
1233 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1230 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1234 modified.append(fn)
1231 modified.append(fn)
1235 elif listclean:
1232 elif listclean:
1236 clean.append(fn)
1233 clean.append(fn)
1237 del mf1[fn]
1234 del mf1[fn]
1238 else:
1235 else:
1239 added.append(fn)
1236 added.append(fn)
1240 removed = mf1.keys()
1237 removed = mf1.keys()
1241
1238
1242 r = modified, added, removed, deleted, unknown, ignored, clean
1239 r = modified, added, removed, deleted, unknown, ignored, clean
1243
1240
1244 if listsubrepos:
1241 if listsubrepos:
1245 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1242 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1246 if working:
1243 if working:
1247 rev2 = None
1244 rev2 = None
1248 else:
1245 else:
1249 rev2 = ctx2.substate[subpath][1]
1246 rev2 = ctx2.substate[subpath][1]
1250 try:
1247 try:
1251 submatch = matchmod.narrowmatcher(subpath, match)
1248 submatch = matchmod.narrowmatcher(subpath, match)
1252 s = sub.status(rev2, match=submatch, ignored=listignored,
1249 s = sub.status(rev2, match=submatch, ignored=listignored,
1253 clean=listclean, unknown=listunknown,
1250 clean=listclean, unknown=listunknown,
1254 listsubrepos=True)
1251 listsubrepos=True)
1255 for rfiles, sfiles in zip(r, s):
1252 for rfiles, sfiles in zip(r, s):
1256 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1253 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1257 except error.LookupError:
1254 except error.LookupError:
1258 self.ui.status(_("skipping missing subrepository: %s\n")
1255 self.ui.status(_("skipping missing subrepository: %s\n")
1259 % subpath)
1256 % subpath)
1260
1257
1261 for l in r:
1258 for l in r:
1262 l.sort()
1259 l.sort()
1263 return r
1260 return r
1264
1261
1265 def heads(self, start=None):
1262 def heads(self, start=None):
1266 heads = self.changelog.heads(start)
1263 heads = self.changelog.heads(start)
1267 # sort the output in rev descending order
1264 # sort the output in rev descending order
1268 return sorted(heads, key=self.changelog.rev, reverse=True)
1265 return sorted(heads, key=self.changelog.rev, reverse=True)
1269
1266
1270 def branchheads(self, branch=None, start=None, closed=False):
1267 def branchheads(self, branch=None, start=None, closed=False):
1271 '''return a (possibly filtered) list of heads for the given branch
1268 '''return a (possibly filtered) list of heads for the given branch
1272
1269
1273 Heads are returned in topological order, from newest to oldest.
1270 Heads are returned in topological order, from newest to oldest.
1274 If branch is None, use the dirstate branch.
1271 If branch is None, use the dirstate branch.
1275 If start is not None, return only heads reachable from start.
1272 If start is not None, return only heads reachable from start.
1276 If closed is True, return heads that are marked as closed as well.
1273 If closed is True, return heads that are marked as closed as well.
1277 '''
1274 '''
1278 if branch is None:
1275 if branch is None:
1279 branch = self[None].branch()
1276 branch = self[None].branch()
1280 branches = self.branchmap()
1277 branches = self.branchmap()
1281 if branch not in branches:
1278 if branch not in branches:
1282 return []
1279 return []
1283 # the cache returns heads ordered lowest to highest
1280 # the cache returns heads ordered lowest to highest
1284 bheads = list(reversed(branches[branch]))
1281 bheads = list(reversed(branches[branch]))
1285 if start is not None:
1282 if start is not None:
1286 # filter out the heads that cannot be reached from startrev
1283 # filter out the heads that cannot be reached from startrev
1287 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1284 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1288 bheads = [h for h in bheads if h in fbheads]
1285 bheads = [h for h in bheads if h in fbheads]
1289 if not closed:
1286 if not closed:
1290 bheads = [h for h in bheads if
1287 bheads = [h for h in bheads if
1291 ('close' not in self.changelog.read(h)[5])]
1288 ('close' not in self.changelog.read(h)[5])]
1292 return bheads
1289 return bheads
1293
1290
1294 def branches(self, nodes):
1291 def branches(self, nodes):
1295 if not nodes:
1292 if not nodes:
1296 nodes = [self.changelog.tip()]
1293 nodes = [self.changelog.tip()]
1297 b = []
1294 b = []
1298 for n in nodes:
1295 for n in nodes:
1299 t = n
1296 t = n
1300 while 1:
1297 while 1:
1301 p = self.changelog.parents(n)
1298 p = self.changelog.parents(n)
1302 if p[1] != nullid or p[0] == nullid:
1299 if p[1] != nullid or p[0] == nullid:
1303 b.append((t, n, p[0], p[1]))
1300 b.append((t, n, p[0], p[1]))
1304 break
1301 break
1305 n = p[0]
1302 n = p[0]
1306 return b
1303 return b
1307
1304
1308 def between(self, pairs):
1305 def between(self, pairs):
1309 r = []
1306 r = []
1310
1307
1311 for top, bottom in pairs:
1308 for top, bottom in pairs:
1312 n, l, i = top, [], 0
1309 n, l, i = top, [], 0
1313 f = 1
1310 f = 1
1314
1311
1315 while n != bottom and n != nullid:
1312 while n != bottom and n != nullid:
1316 p = self.changelog.parents(n)[0]
1313 p = self.changelog.parents(n)[0]
1317 if i == f:
1314 if i == f:
1318 l.append(n)
1315 l.append(n)
1319 f = f * 2
1316 f = f * 2
1320 n = p
1317 n = p
1321 i += 1
1318 i += 1
1322
1319
1323 r.append(l)
1320 r.append(l)
1324
1321
1325 return r
1322 return r
1326
1323
1327 def pull(self, remote, heads=None, force=False):
1324 def pull(self, remote, heads=None, force=False):
1328 lock = self.lock()
1325 lock = self.lock()
1329 try:
1326 try:
1330 usecommon = remote.capable('getbundle')
1327 usecommon = remote.capable('getbundle')
1331 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1328 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1332 force=force, commononly=usecommon)
1329 force=force, commononly=usecommon)
1333 common, fetch, rheads = tmp
1330 common, fetch, rheads = tmp
1334 if not fetch:
1331 if not fetch:
1335 self.ui.status(_("no changes found\n"))
1332 self.ui.status(_("no changes found\n"))
1336 result = 0
1333 result = 0
1337 else:
1334 else:
1338 if heads is None and list(common) == [nullid]:
1335 if heads is None and list(common) == [nullid]:
1339 self.ui.status(_("requesting all changes\n"))
1336 self.ui.status(_("requesting all changes\n"))
1340 elif heads is None and remote.capable('changegroupsubset'):
1337 elif heads is None and remote.capable('changegroupsubset'):
1341 # issue1320, avoid a race if remote changed after discovery
1338 # issue1320, avoid a race if remote changed after discovery
1342 heads = rheads
1339 heads = rheads
1343
1340
1344 if usecommon:
1341 if usecommon:
1345 cg = remote.getbundle('pull', common=common,
1342 cg = remote.getbundle('pull', common=common,
1346 heads=heads or rheads)
1343 heads=heads or rheads)
1347 elif heads is None:
1344 elif heads is None:
1348 cg = remote.changegroup(fetch, 'pull')
1345 cg = remote.changegroup(fetch, 'pull')
1349 elif not remote.capable('changegroupsubset'):
1346 elif not remote.capable('changegroupsubset'):
1350 raise util.Abort(_("partial pull cannot be done because "
1347 raise util.Abort(_("partial pull cannot be done because "
1351 "other repository doesn't support "
1348 "other repository doesn't support "
1352 "changegroupsubset."))
1349 "changegroupsubset."))
1353 else:
1350 else:
1354 cg = remote.changegroupsubset(fetch, heads, 'pull')
1351 cg = remote.changegroupsubset(fetch, heads, 'pull')
1355 result = self.addchangegroup(cg, 'pull', remote.url(),
1352 result = self.addchangegroup(cg, 'pull', remote.url(),
1356 lock=lock)
1353 lock=lock)
1357 finally:
1354 finally:
1358 lock.release()
1355 lock.release()
1359
1356
1360 return result
1357 return result
1361
1358
1362 def checkpush(self, force, revs):
1359 def checkpush(self, force, revs):
1363 """Extensions can override this function if additional checks have
1360 """Extensions can override this function if additional checks have
1364 to be performed before pushing, or call it if they override push
1361 to be performed before pushing, or call it if they override push
1365 command.
1362 command.
1366 """
1363 """
1367 pass
1364 pass
1368
1365
1369 def push(self, remote, force=False, revs=None, newbranch=False):
1366 def push(self, remote, force=False, revs=None, newbranch=False):
1370 '''Push outgoing changesets (limited by revs) from the current
1367 '''Push outgoing changesets (limited by revs) from the current
1371 repository to remote. Return an integer:
1368 repository to remote. Return an integer:
1372 - 0 means HTTP error *or* nothing to push
1369 - 0 means HTTP error *or* nothing to push
1373 - 1 means we pushed and remote head count is unchanged *or*
1370 - 1 means we pushed and remote head count is unchanged *or*
1374 we have outgoing changesets but refused to push
1371 we have outgoing changesets but refused to push
1375 - other values as described by addchangegroup()
1372 - other values as described by addchangegroup()
1376 '''
1373 '''
1377 # there are two ways to push to remote repo:
1374 # there are two ways to push to remote repo:
1378 #
1375 #
1379 # addchangegroup assumes local user can lock remote
1376 # addchangegroup assumes local user can lock remote
1380 # repo (local filesystem, old ssh servers).
1377 # repo (local filesystem, old ssh servers).
1381 #
1378 #
1382 # unbundle assumes local user cannot lock remote repo (new ssh
1379 # unbundle assumes local user cannot lock remote repo (new ssh
1383 # servers, http servers).
1380 # servers, http servers).
1384
1381
1385 self.checkpush(force, revs)
1382 self.checkpush(force, revs)
1386 lock = None
1383 lock = None
1387 unbundle = remote.capable('unbundle')
1384 unbundle = remote.capable('unbundle')
1388 if not unbundle:
1385 if not unbundle:
1389 lock = remote.lock()
1386 lock = remote.lock()
1390 try:
1387 try:
1391 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1388 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1392 newbranch)
1389 newbranch)
1393 ret = remote_heads
1390 ret = remote_heads
1394 if cg is not None:
1391 if cg is not None:
1395 if unbundle:
1392 if unbundle:
1396 # local repo finds heads on server, finds out what
1393 # local repo finds heads on server, finds out what
1397 # revs it must push. once revs transferred, if server
1394 # revs it must push. once revs transferred, if server
1398 # finds it has different heads (someone else won
1395 # finds it has different heads (someone else won
1399 # commit/push race), server aborts.
1396 # commit/push race), server aborts.
1400 if force:
1397 if force:
1401 remote_heads = ['force']
1398 remote_heads = ['force']
1402 # ssh: return remote's addchangegroup()
1399 # ssh: return remote's addchangegroup()
1403 # http: return remote's addchangegroup() or 0 for error
1400 # http: return remote's addchangegroup() or 0 for error
1404 ret = remote.unbundle(cg, remote_heads, 'push')
1401 ret = remote.unbundle(cg, remote_heads, 'push')
1405 else:
1402 else:
1406 # we return an integer indicating remote head count change
1403 # we return an integer indicating remote head count change
1407 ret = remote.addchangegroup(cg, 'push', self.url(),
1404 ret = remote.addchangegroup(cg, 'push', self.url(),
1408 lock=lock)
1405 lock=lock)
1409 finally:
1406 finally:
1410 if lock is not None:
1407 if lock is not None:
1411 lock.release()
1408 lock.release()
1412
1409
1413 self.ui.debug("checking for updated bookmarks\n")
1410 self.ui.debug("checking for updated bookmarks\n")
1414 rb = remote.listkeys('bookmarks')
1411 rb = remote.listkeys('bookmarks')
1415 for k in rb.keys():
1412 for k in rb.keys():
1416 if k in self._bookmarks:
1413 if k in self._bookmarks:
1417 nr, nl = rb[k], hex(self._bookmarks[k])
1414 nr, nl = rb[k], hex(self._bookmarks[k])
1418 if nr in self:
1415 if nr in self:
1419 cr = self[nr]
1416 cr = self[nr]
1420 cl = self[nl]
1417 cl = self[nl]
1421 if cl in cr.descendants():
1418 if cl in cr.descendants():
1422 r = remote.pushkey('bookmarks', k, nr, nl)
1419 r = remote.pushkey('bookmarks', k, nr, nl)
1423 if r:
1420 if r:
1424 self.ui.status(_("updating bookmark %s\n") % k)
1421 self.ui.status(_("updating bookmark %s\n") % k)
1425 else:
1422 else:
1426 self.ui.warn(_('updating bookmark %s'
1423 self.ui.warn(_('updating bookmark %s'
1427 ' failed!\n') % k)
1424 ' failed!\n') % k)
1428
1425
1429 return ret
1426 return ret
1430
1427
1431 def changegroupinfo(self, nodes, source):
1428 def changegroupinfo(self, nodes, source):
1432 if self.ui.verbose or source == 'bundle':
1429 if self.ui.verbose or source == 'bundle':
1433 self.ui.status(_("%d changesets found\n") % len(nodes))
1430 self.ui.status(_("%d changesets found\n") % len(nodes))
1434 if self.ui.debugflag:
1431 if self.ui.debugflag:
1435 self.ui.debug("list of changesets:\n")
1432 self.ui.debug("list of changesets:\n")
1436 for node in nodes:
1433 for node in nodes:
1437 self.ui.debug("%s\n" % hex(node))
1434 self.ui.debug("%s\n" % hex(node))
1438
1435
1439 def changegroupsubset(self, bases, heads, source):
1436 def changegroupsubset(self, bases, heads, source):
1440 """Compute a changegroup consisting of all the nodes that are
1437 """Compute a changegroup consisting of all the nodes that are
1441 descendents of any of the bases and ancestors of any of the heads.
1438 descendents of any of the bases and ancestors of any of the heads.
1442 Return a chunkbuffer object whose read() method will return
1439 Return a chunkbuffer object whose read() method will return
1443 successive changegroup chunks.
1440 successive changegroup chunks.
1444
1441
1445 It is fairly complex as determining which filenodes and which
1442 It is fairly complex as determining which filenodes and which
1446 manifest nodes need to be included for the changeset to be complete
1443 manifest nodes need to be included for the changeset to be complete
1447 is non-trivial.
1444 is non-trivial.
1448
1445
1449 Another wrinkle is doing the reverse, figuring out which changeset in
1446 Another wrinkle is doing the reverse, figuring out which changeset in
1450 the changegroup a particular filenode or manifestnode belongs to.
1447 the changegroup a particular filenode or manifestnode belongs to.
1451 """
1448 """
1452 cl = self.changelog
1449 cl = self.changelog
1453 if not bases:
1450 if not bases:
1454 bases = [nullid]
1451 bases = [nullid]
1455 csets, bases, heads = cl.nodesbetween(bases, heads)
1452 csets, bases, heads = cl.nodesbetween(bases, heads)
1456 # We assume that all ancestors of bases are known
1453 # We assume that all ancestors of bases are known
1457 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1454 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1458 return self._changegroupsubset(common, csets, heads, source)
1455 return self._changegroupsubset(common, csets, heads, source)
1459
1456
1460 def getbundle(self, source, heads=None, common=None):
1457 def getbundle(self, source, heads=None, common=None):
1461 """Like changegroupsubset, but returns the set difference between the
1458 """Like changegroupsubset, but returns the set difference between the
1462 ancestors of heads and the ancestors common.
1459 ancestors of heads and the ancestors common.
1463
1460
1464 If heads is None, use the local heads. If common is None, use [nullid].
1461 If heads is None, use the local heads. If common is None, use [nullid].
1465
1462
1466 The nodes in common might not all be known locally due to the way the
1463 The nodes in common might not all be known locally due to the way the
1467 current discovery protocol works.
1464 current discovery protocol works.
1468 """
1465 """
1469 cl = self.changelog
1466 cl = self.changelog
1470 if common:
1467 if common:
1471 nm = cl.nodemap
1468 nm = cl.nodemap
1472 common = [n for n in common if n in nm]
1469 common = [n for n in common if n in nm]
1473 else:
1470 else:
1474 common = [nullid]
1471 common = [nullid]
1475 if not heads:
1472 if not heads:
1476 heads = cl.heads()
1473 heads = cl.heads()
1477 common, missing = cl.findcommonmissing(common, heads)
1474 common, missing = cl.findcommonmissing(common, heads)
1478 return self._changegroupsubset(common, missing, heads, source)
1475 return self._changegroupsubset(common, missing, heads, source)
1479
1476
1480 def _changegroupsubset(self, commonrevs, csets, heads, source):
1477 def _changegroupsubset(self, commonrevs, csets, heads, source):
1481
1478
1482 cl = self.changelog
1479 cl = self.changelog
1483 mf = self.manifest
1480 mf = self.manifest
1484 mfs = {} # needed manifests
1481 mfs = {} # needed manifests
1485 fnodes = {} # needed file nodes
1482 fnodes = {} # needed file nodes
1486 changedfiles = set()
1483 changedfiles = set()
1487 fstate = ['', {}]
1484 fstate = ['', {}]
1488 count = [0]
1485 count = [0]
1489
1486
1490 # can we go through the fast path ?
1487 # can we go through the fast path ?
1491 heads.sort()
1488 heads.sort()
1492 if heads == sorted(self.heads()):
1489 if heads == sorted(self.heads()):
1493 return self._changegroup(csets, source)
1490 return self._changegroup(csets, source)
1494
1491
1495 # slow path
1492 # slow path
1496 self.hook('preoutgoing', throw=True, source=source)
1493 self.hook('preoutgoing', throw=True, source=source)
1497 self.changegroupinfo(csets, source)
1494 self.changegroupinfo(csets, source)
1498
1495
1499 # filter any nodes that claim to be part of the known set
1496 # filter any nodes that claim to be part of the known set
1500 def prune(revlog, missing):
1497 def prune(revlog, missing):
1501 for n in missing:
1498 for n in missing:
1502 if revlog.linkrev(revlog.rev(n)) not in commonrevs:
1499 if revlog.linkrev(revlog.rev(n)) not in commonrevs:
1503 yield n
1500 yield n
1504
1501
1505 def lookup(revlog, x):
1502 def lookup(revlog, x):
1506 if revlog == cl:
1503 if revlog == cl:
1507 c = cl.read(x)
1504 c = cl.read(x)
1508 changedfiles.update(c[3])
1505 changedfiles.update(c[3])
1509 mfs.setdefault(c[0], x)
1506 mfs.setdefault(c[0], x)
1510 count[0] += 1
1507 count[0] += 1
1511 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1508 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1512 return x
1509 return x
1513 elif revlog == mf:
1510 elif revlog == mf:
1514 clnode = mfs[x]
1511 clnode = mfs[x]
1515 mdata = mf.readfast(x)
1512 mdata = mf.readfast(x)
1516 for f in changedfiles:
1513 for f in changedfiles:
1517 if f in mdata:
1514 if f in mdata:
1518 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1515 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1519 count[0] += 1
1516 count[0] += 1
1520 self.ui.progress(_('bundling'), count[0],
1517 self.ui.progress(_('bundling'), count[0],
1521 unit=_('manifests'), total=len(mfs))
1518 unit=_('manifests'), total=len(mfs))
1522 return mfs[x]
1519 return mfs[x]
1523 else:
1520 else:
1524 self.ui.progress(
1521 self.ui.progress(
1525 _('bundling'), count[0], item=fstate[0],
1522 _('bundling'), count[0], item=fstate[0],
1526 unit=_('files'), total=len(changedfiles))
1523 unit=_('files'), total=len(changedfiles))
1527 return fstate[1][x]
1524 return fstate[1][x]
1528
1525
1529 bundler = changegroup.bundle10(lookup)
1526 bundler = changegroup.bundle10(lookup)
1530
1527
1531 def gengroup():
1528 def gengroup():
1532 # Create a changenode group generator that will call our functions
1529 # Create a changenode group generator that will call our functions
1533 # back to lookup the owning changenode and collect information.
1530 # back to lookup the owning changenode and collect information.
1534 for chunk in cl.group(csets, bundler):
1531 for chunk in cl.group(csets, bundler):
1535 yield chunk
1532 yield chunk
1536 self.ui.progress(_('bundling'), None)
1533 self.ui.progress(_('bundling'), None)
1537
1534
1538 # Create a generator for the manifestnodes that calls our lookup
1535 # Create a generator for the manifestnodes that calls our lookup
1539 # and data collection functions back.
1536 # and data collection functions back.
1540 count[0] = 0
1537 count[0] = 0
1541 for chunk in mf.group(prune(mf, mfs), bundler):
1538 for chunk in mf.group(prune(mf, mfs), bundler):
1542 yield chunk
1539 yield chunk
1543 self.ui.progress(_('bundling'), None)
1540 self.ui.progress(_('bundling'), None)
1544
1541
1545 mfs.clear()
1542 mfs.clear()
1546
1543
1547 # Go through all our files in order sorted by name.
1544 # Go through all our files in order sorted by name.
1548 count[0] = 0
1545 count[0] = 0
1549 for fname in sorted(changedfiles):
1546 for fname in sorted(changedfiles):
1550 filerevlog = self.file(fname)
1547 filerevlog = self.file(fname)
1551 if not len(filerevlog):
1548 if not len(filerevlog):
1552 raise util.Abort(_("empty or missing revlog for %s") % fname)
1549 raise util.Abort(_("empty or missing revlog for %s") % fname)
1553 fstate[0] = fname
1550 fstate[0] = fname
1554 fstate[1] = fnodes.pop(fname, {})
1551 fstate[1] = fnodes.pop(fname, {})
1555 first = True
1552 first = True
1556
1553
1557 for chunk in filerevlog.group(prune(filerevlog, fstate[1]),
1554 for chunk in filerevlog.group(prune(filerevlog, fstate[1]),
1558 bundler):
1555 bundler):
1559 if first:
1556 if first:
1560 if chunk == bundler.close():
1557 if chunk == bundler.close():
1561 break
1558 break
1562 count[0] += 1
1559 count[0] += 1
1563 yield bundler.fileheader(fname)
1560 yield bundler.fileheader(fname)
1564 first = False
1561 first = False
1565 yield chunk
1562 yield chunk
1566 # Signal that no more groups are left.
1563 # Signal that no more groups are left.
1567 yield bundler.close()
1564 yield bundler.close()
1568 self.ui.progress(_('bundling'), None)
1565 self.ui.progress(_('bundling'), None)
1569
1566
1570 if csets:
1567 if csets:
1571 self.hook('outgoing', node=hex(csets[0]), source=source)
1568 self.hook('outgoing', node=hex(csets[0]), source=source)
1572
1569
1573 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1570 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1574
1571
1575 def changegroup(self, basenodes, source):
1572 def changegroup(self, basenodes, source):
1576 # to avoid a race we use changegroupsubset() (issue1320)
1573 # to avoid a race we use changegroupsubset() (issue1320)
1577 return self.changegroupsubset(basenodes, self.heads(), source)
1574 return self.changegroupsubset(basenodes, self.heads(), source)
1578
1575
1579 def _changegroup(self, nodes, source):
1576 def _changegroup(self, nodes, source):
1580 """Compute the changegroup of all nodes that we have that a recipient
1577 """Compute the changegroup of all nodes that we have that a recipient
1581 doesn't. Return a chunkbuffer object whose read() method will return
1578 doesn't. Return a chunkbuffer object whose read() method will return
1582 successive changegroup chunks.
1579 successive changegroup chunks.
1583
1580
1584 This is much easier than the previous function as we can assume that
1581 This is much easier than the previous function as we can assume that
1585 the recipient has any changenode we aren't sending them.
1582 the recipient has any changenode we aren't sending them.
1586
1583
1587 nodes is the set of nodes to send"""
1584 nodes is the set of nodes to send"""
1588
1585
1589 cl = self.changelog
1586 cl = self.changelog
1590 mf = self.manifest
1587 mf = self.manifest
1591 mfs = {}
1588 mfs = {}
1592 changedfiles = set()
1589 changedfiles = set()
1593 fstate = ['']
1590 fstate = ['']
1594 count = [0]
1591 count = [0]
1595
1592
1596 self.hook('preoutgoing', throw=True, source=source)
1593 self.hook('preoutgoing', throw=True, source=source)
1597 self.changegroupinfo(nodes, source)
1594 self.changegroupinfo(nodes, source)
1598
1595
1599 revset = set([cl.rev(n) for n in nodes])
1596 revset = set([cl.rev(n) for n in nodes])
1600
1597
1601 def gennodelst(log):
1598 def gennodelst(log):
1602 for r in log:
1599 for r in log:
1603 if log.linkrev(r) in revset:
1600 if log.linkrev(r) in revset:
1604 yield log.node(r)
1601 yield log.node(r)
1605
1602
1606 def lookup(revlog, x):
1603 def lookup(revlog, x):
1607 if revlog == cl:
1604 if revlog == cl:
1608 c = cl.read(x)
1605 c = cl.read(x)
1609 changedfiles.update(c[3])
1606 changedfiles.update(c[3])
1610 mfs.setdefault(c[0], x)
1607 mfs.setdefault(c[0], x)
1611 count[0] += 1
1608 count[0] += 1
1612 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1609 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1613 return x
1610 return x
1614 elif revlog == mf:
1611 elif revlog == mf:
1615 count[0] += 1
1612 count[0] += 1
1616 self.ui.progress(_('bundling'), count[0],
1613 self.ui.progress(_('bundling'), count[0],
1617 unit=_('manifests'), total=len(mfs))
1614 unit=_('manifests'), total=len(mfs))
1618 return cl.node(revlog.linkrev(revlog.rev(x)))
1615 return cl.node(revlog.linkrev(revlog.rev(x)))
1619 else:
1616 else:
1620 self.ui.progress(
1617 self.ui.progress(
1621 _('bundling'), count[0], item=fstate[0],
1618 _('bundling'), count[0], item=fstate[0],
1622 total=len(changedfiles), unit=_('files'))
1619 total=len(changedfiles), unit=_('files'))
1623 return cl.node(revlog.linkrev(revlog.rev(x)))
1620 return cl.node(revlog.linkrev(revlog.rev(x)))
1624
1621
1625 bundler = changegroup.bundle10(lookup)
1622 bundler = changegroup.bundle10(lookup)
1626
1623
1627 def gengroup():
1624 def gengroup():
1628 '''yield a sequence of changegroup chunks (strings)'''
1625 '''yield a sequence of changegroup chunks (strings)'''
1629 # construct a list of all changed files
1626 # construct a list of all changed files
1630
1627
1631 for chunk in cl.group(nodes, bundler):
1628 for chunk in cl.group(nodes, bundler):
1632 yield chunk
1629 yield chunk
1633 self.ui.progress(_('bundling'), None)
1630 self.ui.progress(_('bundling'), None)
1634
1631
1635 count[0] = 0
1632 count[0] = 0
1636 for chunk in mf.group(gennodelst(mf), bundler):
1633 for chunk in mf.group(gennodelst(mf), bundler):
1637 yield chunk
1634 yield chunk
1638 self.ui.progress(_('bundling'), None)
1635 self.ui.progress(_('bundling'), None)
1639
1636
1640 count[0] = 0
1637 count[0] = 0
1641 for fname in sorted(changedfiles):
1638 for fname in sorted(changedfiles):
1642 filerevlog = self.file(fname)
1639 filerevlog = self.file(fname)
1643 if not len(filerevlog):
1640 if not len(filerevlog):
1644 raise util.Abort(_("empty or missing revlog for %s") % fname)
1641 raise util.Abort(_("empty or missing revlog for %s") % fname)
1645 fstate[0] = fname
1642 fstate[0] = fname
1646 first = True
1643 first = True
1647 for chunk in filerevlog.group(gennodelst(filerevlog), bundler):
1644 for chunk in filerevlog.group(gennodelst(filerevlog), bundler):
1648 if first:
1645 if first:
1649 if chunk == bundler.close():
1646 if chunk == bundler.close():
1650 break
1647 break
1651 count[0] += 1
1648 count[0] += 1
1652 yield bundler.fileheader(fname)
1649 yield bundler.fileheader(fname)
1653 first = False
1650 first = False
1654 yield chunk
1651 yield chunk
1655 yield bundler.close()
1652 yield bundler.close()
1656 self.ui.progress(_('bundling'), None)
1653 self.ui.progress(_('bundling'), None)
1657
1654
1658 if nodes:
1655 if nodes:
1659 self.hook('outgoing', node=hex(nodes[0]), source=source)
1656 self.hook('outgoing', node=hex(nodes[0]), source=source)
1660
1657
1661 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1658 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1662
1659
1663 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1660 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1664 """Add the changegroup returned by source.read() to this repo.
1661 """Add the changegroup returned by source.read() to this repo.
1665 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1662 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1666 the URL of the repo where this changegroup is coming from.
1663 the URL of the repo where this changegroup is coming from.
1667 If lock is not None, the function takes ownership of the lock
1664 If lock is not None, the function takes ownership of the lock
1668 and releases it after the changegroup is added.
1665 and releases it after the changegroup is added.
1669
1666
1670 Return an integer summarizing the change to this repo:
1667 Return an integer summarizing the change to this repo:
1671 - nothing changed or no source: 0
1668 - nothing changed or no source: 0
1672 - more heads than before: 1+added heads (2..n)
1669 - more heads than before: 1+added heads (2..n)
1673 - fewer heads than before: -1-removed heads (-2..-n)
1670 - fewer heads than before: -1-removed heads (-2..-n)
1674 - number of heads stays the same: 1
1671 - number of heads stays the same: 1
1675 """
1672 """
1676 def csmap(x):
1673 def csmap(x):
1677 self.ui.debug("add changeset %s\n" % short(x))
1674 self.ui.debug("add changeset %s\n" % short(x))
1678 return len(cl)
1675 return len(cl)
1679
1676
1680 def revmap(x):
1677 def revmap(x):
1681 return cl.rev(x)
1678 return cl.rev(x)
1682
1679
1683 if not source:
1680 if not source:
1684 return 0
1681 return 0
1685
1682
1686 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1683 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1687
1684
1688 changesets = files = revisions = 0
1685 changesets = files = revisions = 0
1689 efiles = set()
1686 efiles = set()
1690
1687
1691 # write changelog data to temp files so concurrent readers will not see
1688 # write changelog data to temp files so concurrent readers will not see
1692 # inconsistent view
1689 # inconsistent view
1693 cl = self.changelog
1690 cl = self.changelog
1694 cl.delayupdate()
1691 cl.delayupdate()
1695 oldheads = len(cl.heads())
1692 oldheads = len(cl.heads())
1696
1693
1697 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1694 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1698 try:
1695 try:
1699 trp = weakref.proxy(tr)
1696 trp = weakref.proxy(tr)
1700 # pull off the changeset group
1697 # pull off the changeset group
1701 self.ui.status(_("adding changesets\n"))
1698 self.ui.status(_("adding changesets\n"))
1702 clstart = len(cl)
1699 clstart = len(cl)
1703 class prog(object):
1700 class prog(object):
1704 step = _('changesets')
1701 step = _('changesets')
1705 count = 1
1702 count = 1
1706 ui = self.ui
1703 ui = self.ui
1707 total = None
1704 total = None
1708 def __call__(self):
1705 def __call__(self):
1709 self.ui.progress(self.step, self.count, unit=_('chunks'),
1706 self.ui.progress(self.step, self.count, unit=_('chunks'),
1710 total=self.total)
1707 total=self.total)
1711 self.count += 1
1708 self.count += 1
1712 pr = prog()
1709 pr = prog()
1713 source.callback = pr
1710 source.callback = pr
1714
1711
1715 if (cl.addgroup(source, csmap, trp) is None
1712 if (cl.addgroup(source, csmap, trp) is None
1716 and not emptyok):
1713 and not emptyok):
1717 raise util.Abort(_("received changelog group is empty"))
1714 raise util.Abort(_("received changelog group is empty"))
1718 clend = len(cl)
1715 clend = len(cl)
1719 changesets = clend - clstart
1716 changesets = clend - clstart
1720 for c in xrange(clstart, clend):
1717 for c in xrange(clstart, clend):
1721 efiles.update(self[c].files())
1718 efiles.update(self[c].files())
1722 efiles = len(efiles)
1719 efiles = len(efiles)
1723 self.ui.progress(_('changesets'), None)
1720 self.ui.progress(_('changesets'), None)
1724
1721
1725 # pull off the manifest group
1722 # pull off the manifest group
1726 self.ui.status(_("adding manifests\n"))
1723 self.ui.status(_("adding manifests\n"))
1727 pr.step = _('manifests')
1724 pr.step = _('manifests')
1728 pr.count = 1
1725 pr.count = 1
1729 pr.total = changesets # manifests <= changesets
1726 pr.total = changesets # manifests <= changesets
1730 # no need to check for empty manifest group here:
1727 # no need to check for empty manifest group here:
1731 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1728 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1732 # no new manifest will be created and the manifest group will
1729 # no new manifest will be created and the manifest group will
1733 # be empty during the pull
1730 # be empty during the pull
1734 self.manifest.addgroup(source, revmap, trp)
1731 self.manifest.addgroup(source, revmap, trp)
1735 self.ui.progress(_('manifests'), None)
1732 self.ui.progress(_('manifests'), None)
1736
1733
1737 needfiles = {}
1734 needfiles = {}
1738 if self.ui.configbool('server', 'validate', default=False):
1735 if self.ui.configbool('server', 'validate', default=False):
1739 # validate incoming csets have their manifests
1736 # validate incoming csets have their manifests
1740 for cset in xrange(clstart, clend):
1737 for cset in xrange(clstart, clend):
1741 mfest = self.changelog.read(self.changelog.node(cset))[0]
1738 mfest = self.changelog.read(self.changelog.node(cset))[0]
1742 mfest = self.manifest.readdelta(mfest)
1739 mfest = self.manifest.readdelta(mfest)
1743 # store file nodes we must see
1740 # store file nodes we must see
1744 for f, n in mfest.iteritems():
1741 for f, n in mfest.iteritems():
1745 needfiles.setdefault(f, set()).add(n)
1742 needfiles.setdefault(f, set()).add(n)
1746
1743
1747 # process the files
1744 # process the files
1748 self.ui.status(_("adding file changes\n"))
1745 self.ui.status(_("adding file changes\n"))
1749 pr.step = 'files'
1746 pr.step = 'files'
1750 pr.count = 1
1747 pr.count = 1
1751 pr.total = efiles
1748 pr.total = efiles
1752 source.callback = None
1749 source.callback = None
1753
1750
1754 while 1:
1751 while 1:
1755 f = source.chunk()
1752 f = source.chunk()
1756 if not f:
1753 if not f:
1757 break
1754 break
1758 self.ui.debug("adding %s revisions\n" % f)
1755 self.ui.debug("adding %s revisions\n" % f)
1759 pr()
1756 pr()
1760 fl = self.file(f)
1757 fl = self.file(f)
1761 o = len(fl)
1758 o = len(fl)
1762 if fl.addgroup(source, revmap, trp) is None:
1759 if fl.addgroup(source, revmap, trp) is None:
1763 raise util.Abort(_("received file revlog group is empty"))
1760 raise util.Abort(_("received file revlog group is empty"))
1764 revisions += len(fl) - o
1761 revisions += len(fl) - o
1765 files += 1
1762 files += 1
1766 if f in needfiles:
1763 if f in needfiles:
1767 needs = needfiles[f]
1764 needs = needfiles[f]
1768 for new in xrange(o, len(fl)):
1765 for new in xrange(o, len(fl)):
1769 n = fl.node(new)
1766 n = fl.node(new)
1770 if n in needs:
1767 if n in needs:
1771 needs.remove(n)
1768 needs.remove(n)
1772 if not needs:
1769 if not needs:
1773 del needfiles[f]
1770 del needfiles[f]
1774 self.ui.progress(_('files'), None)
1771 self.ui.progress(_('files'), None)
1775
1772
1776 for f, needs in needfiles.iteritems():
1773 for f, needs in needfiles.iteritems():
1777 fl = self.file(f)
1774 fl = self.file(f)
1778 for n in needs:
1775 for n in needs:
1779 try:
1776 try:
1780 fl.rev(n)
1777 fl.rev(n)
1781 except error.LookupError:
1778 except error.LookupError:
1782 raise util.Abort(
1779 raise util.Abort(
1783 _('missing file data for %s:%s - run hg verify') %
1780 _('missing file data for %s:%s - run hg verify') %
1784 (f, hex(n)))
1781 (f, hex(n)))
1785
1782
1786 newheads = len(cl.heads())
1783 newheads = len(cl.heads())
1787 heads = ""
1784 heads = ""
1788 if oldheads and newheads != oldheads:
1785 if oldheads and newheads != oldheads:
1789 heads = _(" (%+d heads)") % (newheads - oldheads)
1786 heads = _(" (%+d heads)") % (newheads - oldheads)
1790
1787
1791 self.ui.status(_("added %d changesets"
1788 self.ui.status(_("added %d changesets"
1792 " with %d changes to %d files%s\n")
1789 " with %d changes to %d files%s\n")
1793 % (changesets, revisions, files, heads))
1790 % (changesets, revisions, files, heads))
1794
1791
1795 if changesets > 0:
1792 if changesets > 0:
1796 p = lambda: cl.writepending() and self.root or ""
1793 p = lambda: cl.writepending() and self.root or ""
1797 self.hook('pretxnchangegroup', throw=True,
1794 self.hook('pretxnchangegroup', throw=True,
1798 node=hex(cl.node(clstart)), source=srctype,
1795 node=hex(cl.node(clstart)), source=srctype,
1799 url=url, pending=p)
1796 url=url, pending=p)
1800
1797
1801 # make changelog see real files again
1798 # make changelog see real files again
1802 cl.finalize(trp)
1799 cl.finalize(trp)
1803
1800
1804 tr.close()
1801 tr.close()
1805 finally:
1802 finally:
1806 tr.release()
1803 tr.release()
1807 if lock:
1804 if lock:
1808 lock.release()
1805 lock.release()
1809
1806
1810 if changesets > 0:
1807 if changesets > 0:
1811 # forcefully update the on-disk branch cache
1808 # forcefully update the on-disk branch cache
1812 self.ui.debug("updating the branch cache\n")
1809 self.ui.debug("updating the branch cache\n")
1813 self.updatebranchcache()
1810 self.updatebranchcache()
1814 self.hook("changegroup", node=hex(cl.node(clstart)),
1811 self.hook("changegroup", node=hex(cl.node(clstart)),
1815 source=srctype, url=url)
1812 source=srctype, url=url)
1816
1813
1817 for i in xrange(clstart, clend):
1814 for i in xrange(clstart, clend):
1818 self.hook("incoming", node=hex(cl.node(i)),
1815 self.hook("incoming", node=hex(cl.node(i)),
1819 source=srctype, url=url)
1816 source=srctype, url=url)
1820
1817
1821 # never return 0 here:
1818 # never return 0 here:
1822 if newheads < oldheads:
1819 if newheads < oldheads:
1823 return newheads - oldheads - 1
1820 return newheads - oldheads - 1
1824 else:
1821 else:
1825 return newheads - oldheads + 1
1822 return newheads - oldheads + 1
1826
1823
1827
1824
1828 def stream_in(self, remote, requirements):
1825 def stream_in(self, remote, requirements):
1829 lock = self.lock()
1826 lock = self.lock()
1830 try:
1827 try:
1831 fp = remote.stream_out()
1828 fp = remote.stream_out()
1832 l = fp.readline()
1829 l = fp.readline()
1833 try:
1830 try:
1834 resp = int(l)
1831 resp = int(l)
1835 except ValueError:
1832 except ValueError:
1836 raise error.ResponseError(
1833 raise error.ResponseError(
1837 _('Unexpected response from remote server:'), l)
1834 _('Unexpected response from remote server:'), l)
1838 if resp == 1:
1835 if resp == 1:
1839 raise util.Abort(_('operation forbidden by server'))
1836 raise util.Abort(_('operation forbidden by server'))
1840 elif resp == 2:
1837 elif resp == 2:
1841 raise util.Abort(_('locking the remote repository failed'))
1838 raise util.Abort(_('locking the remote repository failed'))
1842 elif resp != 0:
1839 elif resp != 0:
1843 raise util.Abort(_('the server sent an unknown error code'))
1840 raise util.Abort(_('the server sent an unknown error code'))
1844 self.ui.status(_('streaming all changes\n'))
1841 self.ui.status(_('streaming all changes\n'))
1845 l = fp.readline()
1842 l = fp.readline()
1846 try:
1843 try:
1847 total_files, total_bytes = map(int, l.split(' ', 1))
1844 total_files, total_bytes = map(int, l.split(' ', 1))
1848 except (ValueError, TypeError):
1845 except (ValueError, TypeError):
1849 raise error.ResponseError(
1846 raise error.ResponseError(
1850 _('Unexpected response from remote server:'), l)
1847 _('Unexpected response from remote server:'), l)
1851 self.ui.status(_('%d files to transfer, %s of data\n') %
1848 self.ui.status(_('%d files to transfer, %s of data\n') %
1852 (total_files, util.bytecount(total_bytes)))
1849 (total_files, util.bytecount(total_bytes)))
1853 start = time.time()
1850 start = time.time()
1854 for i in xrange(total_files):
1851 for i in xrange(total_files):
1855 # XXX doesn't support '\n' or '\r' in filenames
1852 # XXX doesn't support '\n' or '\r' in filenames
1856 l = fp.readline()
1853 l = fp.readline()
1857 try:
1854 try:
1858 name, size = l.split('\0', 1)
1855 name, size = l.split('\0', 1)
1859 size = int(size)
1856 size = int(size)
1860 except (ValueError, TypeError):
1857 except (ValueError, TypeError):
1861 raise error.ResponseError(
1858 raise error.ResponseError(
1862 _('Unexpected response from remote server:'), l)
1859 _('Unexpected response from remote server:'), l)
1863 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1860 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1864 # for backwards compat, name was partially encoded
1861 # for backwards compat, name was partially encoded
1865 ofp = self.sopener(store.decodedir(name), 'w')
1862 ofp = self.sopener(store.decodedir(name), 'w')
1866 for chunk in util.filechunkiter(fp, limit=size):
1863 for chunk in util.filechunkiter(fp, limit=size):
1867 ofp.write(chunk)
1864 ofp.write(chunk)
1868 ofp.close()
1865 ofp.close()
1869 elapsed = time.time() - start
1866 elapsed = time.time() - start
1870 if elapsed <= 0:
1867 if elapsed <= 0:
1871 elapsed = 0.001
1868 elapsed = 0.001
1872 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1869 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1873 (util.bytecount(total_bytes), elapsed,
1870 (util.bytecount(total_bytes), elapsed,
1874 util.bytecount(total_bytes / elapsed)))
1871 util.bytecount(total_bytes / elapsed)))
1875
1872
1876 # new requirements = old non-format requirements + new format-related
1873 # new requirements = old non-format requirements + new format-related
1877 # requirements from the streamed-in repository
1874 # requirements from the streamed-in repository
1878 requirements.update(set(self.requirements) - self.supportedformats)
1875 requirements.update(set(self.requirements) - self.supportedformats)
1879 self._applyrequirements(requirements)
1876 self._applyrequirements(requirements)
1880 self._writerequirements()
1877 self._writerequirements()
1881
1878
1882 self.invalidate()
1879 self.invalidate()
1883 return len(self.heads()) + 1
1880 return len(self.heads()) + 1
1884 finally:
1881 finally:
1885 lock.release()
1882 lock.release()
1886
1883
1887 def clone(self, remote, heads=[], stream=False):
1884 def clone(self, remote, heads=[], stream=False):
1888 '''clone remote repository.
1885 '''clone remote repository.
1889
1886
1890 keyword arguments:
1887 keyword arguments:
1891 heads: list of revs to clone (forces use of pull)
1888 heads: list of revs to clone (forces use of pull)
1892 stream: use streaming clone if possible'''
1889 stream: use streaming clone if possible'''
1893
1890
1894 # now, all clients that can request uncompressed clones can
1891 # now, all clients that can request uncompressed clones can
1895 # read repo formats supported by all servers that can serve
1892 # read repo formats supported by all servers that can serve
1896 # them.
1893 # them.
1897
1894
1898 # if revlog format changes, client will have to check version
1895 # if revlog format changes, client will have to check version
1899 # and format flags on "stream" capability, and use
1896 # and format flags on "stream" capability, and use
1900 # uncompressed only if compatible.
1897 # uncompressed only if compatible.
1901
1898
1902 if stream and not heads:
1899 if stream and not heads:
1903 # 'stream' means remote revlog format is revlogv1 only
1900 # 'stream' means remote revlog format is revlogv1 only
1904 if remote.capable('stream'):
1901 if remote.capable('stream'):
1905 return self.stream_in(remote, set(('revlogv1',)))
1902 return self.stream_in(remote, set(('revlogv1',)))
1906 # otherwise, 'streamreqs' contains the remote revlog format
1903 # otherwise, 'streamreqs' contains the remote revlog format
1907 streamreqs = remote.capable('streamreqs')
1904 streamreqs = remote.capable('streamreqs')
1908 if streamreqs:
1905 if streamreqs:
1909 streamreqs = set(streamreqs.split(','))
1906 streamreqs = set(streamreqs.split(','))
1910 # if we support it, stream in and adjust our requirements
1907 # if we support it, stream in and adjust our requirements
1911 if not streamreqs - self.supportedformats:
1908 if not streamreqs - self.supportedformats:
1912 return self.stream_in(remote, streamreqs)
1909 return self.stream_in(remote, streamreqs)
1913 return self.pull(remote, heads)
1910 return self.pull(remote, heads)
1914
1911
1915 def pushkey(self, namespace, key, old, new):
1912 def pushkey(self, namespace, key, old, new):
1916 return pushkey.push(self, namespace, key, old, new)
1913 return pushkey.push(self, namespace, key, old, new)
1917
1914
1918 def listkeys(self, namespace):
1915 def listkeys(self, namespace):
1919 return pushkey.list(self, namespace)
1916 return pushkey.list(self, namespace)
1920
1917
1921 def debugwireargs(self, one, two, three=None, four=None):
1918 def debugwireargs(self, one, two, three=None, four=None):
1922 '''used to test argument passing over the wire'''
1919 '''used to test argument passing over the wire'''
1923 return "%s %s %s %s" % (one, two, three, four)
1920 return "%s %s %s %s" % (one, two, three, four)
1924
1921
1925 # used to avoid circular references so destructors work
1922 # used to avoid circular references so destructors work
1926 def aftertrans(files):
1923 def aftertrans(files):
1927 renamefiles = [tuple(t) for t in files]
1924 renamefiles = [tuple(t) for t in files]
1928 def a():
1925 def a():
1929 for src, dest in renamefiles:
1926 for src, dest in renamefiles:
1930 util.rename(src, dest)
1927 util.rename(src, dest)
1931 return a
1928 return a
1932
1929
1933 def instance(ui, path, create):
1930 def instance(ui, path, create):
1934 return localrepository(ui, urlmod.localpath(path), create)
1931 return localrepository(ui, urlmod.localpath(path), create)
1935
1932
1936 def islocal(path):
1933 def islocal(path):
1937 return True
1934 return True
General Comments 0
You need to be logged in to leave comments. Login now