##// END OF EJS Templates
localrepo: check nested repos against working directory...
Martin Geisler -
r12174:7bccd042 default
parent child Browse files
Show More
@@ -1,1864 +1,1864 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 import url as urlmod
17 import url as urlmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 propertycache = util.propertycache
20 propertycache = util.propertycache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
24 supported = set('revlogv1 store fncache shared parentdelta'.split())
24 supported = set('revlogv1 store fncache shared parentdelta'.split())
25
25
26 def __init__(self, baseui, path=None, create=0):
26 def __init__(self, baseui, path=None, create=0):
27 repo.repository.__init__(self)
27 repo.repository.__init__(self)
28 self.root = os.path.realpath(util.expandpath(path))
28 self.root = os.path.realpath(util.expandpath(path))
29 self.path = os.path.join(self.root, ".hg")
29 self.path = os.path.join(self.root, ".hg")
30 self.origroot = path
30 self.origroot = path
31 self.auditor = util.path_auditor(self.root, self._checknested)
31 self.auditor = util.path_auditor(self.root, self._checknested)
32 self.opener = util.opener(self.path)
32 self.opener = util.opener(self.path)
33 self.wopener = util.opener(self.root)
33 self.wopener = util.opener(self.root)
34 self.baseui = baseui
34 self.baseui = baseui
35 self.ui = baseui.copy()
35 self.ui = baseui.copy()
36
36
37 try:
37 try:
38 self.ui.readconfig(self.join("hgrc"), self.root)
38 self.ui.readconfig(self.join("hgrc"), self.root)
39 extensions.loadall(self.ui)
39 extensions.loadall(self.ui)
40 except IOError:
40 except IOError:
41 pass
41 pass
42
42
43 if not os.path.isdir(self.path):
43 if not os.path.isdir(self.path):
44 if create:
44 if create:
45 if not os.path.exists(path):
45 if not os.path.exists(path):
46 util.makedirs(path)
46 util.makedirs(path)
47 os.mkdir(self.path)
47 os.mkdir(self.path)
48 requirements = ["revlogv1"]
48 requirements = ["revlogv1"]
49 if self.ui.configbool('format', 'usestore', True):
49 if self.ui.configbool('format', 'usestore', True):
50 os.mkdir(os.path.join(self.path, "store"))
50 os.mkdir(os.path.join(self.path, "store"))
51 requirements.append("store")
51 requirements.append("store")
52 if self.ui.configbool('format', 'usefncache', True):
52 if self.ui.configbool('format', 'usefncache', True):
53 requirements.append("fncache")
53 requirements.append("fncache")
54 # create an invalid changelog
54 # create an invalid changelog
55 self.opener("00changelog.i", "a").write(
55 self.opener("00changelog.i", "a").write(
56 '\0\0\0\2' # represents revlogv2
56 '\0\0\0\2' # represents revlogv2
57 ' dummy changelog to prevent using the old repo layout'
57 ' dummy changelog to prevent using the old repo layout'
58 )
58 )
59 if self.ui.configbool('format', 'parentdelta', False):
59 if self.ui.configbool('format', 'parentdelta', False):
60 requirements.append("parentdelta")
60 requirements.append("parentdelta")
61 reqfile = self.opener("requires", "w")
61 reqfile = self.opener("requires", "w")
62 for r in requirements:
62 for r in requirements:
63 reqfile.write("%s\n" % r)
63 reqfile.write("%s\n" % r)
64 reqfile.close()
64 reqfile.close()
65 else:
65 else:
66 raise error.RepoError(_("repository %s not found") % path)
66 raise error.RepoError(_("repository %s not found") % path)
67 elif create:
67 elif create:
68 raise error.RepoError(_("repository %s already exists") % path)
68 raise error.RepoError(_("repository %s already exists") % path)
69 else:
69 else:
70 # find requirements
70 # find requirements
71 requirements = set()
71 requirements = set()
72 try:
72 try:
73 requirements = set(self.opener("requires").read().splitlines())
73 requirements = set(self.opener("requires").read().splitlines())
74 except IOError, inst:
74 except IOError, inst:
75 if inst.errno != errno.ENOENT:
75 if inst.errno != errno.ENOENT:
76 raise
76 raise
77 for r in requirements - self.supported:
77 for r in requirements - self.supported:
78 raise error.RepoError(_("requirement '%s' not supported") % r)
78 raise error.RepoError(_("requirement '%s' not supported") % r)
79
79
80 self.sharedpath = self.path
80 self.sharedpath = self.path
81 try:
81 try:
82 s = os.path.realpath(self.opener("sharedpath").read())
82 s = os.path.realpath(self.opener("sharedpath").read())
83 if not os.path.exists(s):
83 if not os.path.exists(s):
84 raise error.RepoError(
84 raise error.RepoError(
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 self.sharedpath = s
86 self.sharedpath = s
87 except IOError, inst:
87 except IOError, inst:
88 if inst.errno != errno.ENOENT:
88 if inst.errno != errno.ENOENT:
89 raise
89 raise
90
90
91 self.store = store.store(requirements, self.sharedpath, util.opener)
91 self.store = store.store(requirements, self.sharedpath, util.opener)
92 self.spath = self.store.path
92 self.spath = self.store.path
93 self.sopener = self.store.opener
93 self.sopener = self.store.opener
94 self.sjoin = self.store.join
94 self.sjoin = self.store.join
95 self.opener.createmode = self.store.createmode
95 self.opener.createmode = self.store.createmode
96 self.sopener.options = {}
96 self.sopener.options = {}
97 if 'parentdelta' in requirements:
97 if 'parentdelta' in requirements:
98 self.sopener.options['parentdelta'] = 1
98 self.sopener.options['parentdelta'] = 1
99
99
100 # These two define the set of tags for this repository. _tags
100 # These two define the set of tags for this repository. _tags
101 # maps tag name to node; _tagtypes maps tag name to 'global' or
101 # maps tag name to node; _tagtypes maps tag name to 'global' or
102 # 'local'. (Global tags are defined by .hgtags across all
102 # 'local'. (Global tags are defined by .hgtags across all
103 # heads, and local tags are defined in .hg/localtags.) They
103 # heads, and local tags are defined in .hg/localtags.) They
104 # constitute the in-memory cache of tags.
104 # constitute the in-memory cache of tags.
105 self._tags = None
105 self._tags = None
106 self._tagtypes = None
106 self._tagtypes = None
107
107
108 self._branchcache = None # in UTF-8
108 self._branchcache = None # in UTF-8
109 self._branchcachetip = None
109 self._branchcachetip = None
110 self.nodetagscache = None
110 self.nodetagscache = None
111 self.filterpats = {}
111 self.filterpats = {}
112 self._datafilters = {}
112 self._datafilters = {}
113 self._transref = self._lockref = self._wlockref = None
113 self._transref = self._lockref = self._wlockref = None
114
114
115 def _checknested(self, path):
115 def _checknested(self, path):
116 """Determine if path is a legal nested repository."""
116 """Determine if path is a legal nested repository."""
117 if not path.startswith(self.root):
117 if not path.startswith(self.root):
118 return False
118 return False
119 subpath = path[len(self.root) + 1:]
119 subpath = path[len(self.root) + 1:]
120
120
121 # XXX: Checking against the current working copy is wrong in
121 # XXX: Checking against the current working copy is wrong in
122 # the sense that it can reject things like
122 # the sense that it can reject things like
123 #
123 #
124 # $ hg cat -r 10 sub/x.txt
124 # $ hg cat -r 10 sub/x.txt
125 #
125 #
126 # if sub/ is no longer a subrepository in the working copy
126 # if sub/ is no longer a subrepository in the working copy
127 # parent revision.
127 # parent revision.
128 #
128 #
129 # However, it can of course also allow things that would have
129 # However, it can of course also allow things that would have
130 # been rejected before, such as the above cat command if sub/
130 # been rejected before, such as the above cat command if sub/
131 # is a subrepository now, but was a normal directory before.
131 # is a subrepository now, but was a normal directory before.
132 # The old path auditor would have rejected by mistake since it
132 # The old path auditor would have rejected by mistake since it
133 # panics when it sees sub/.hg/.
133 # panics when it sees sub/.hg/.
134 #
134 #
135 # All in all, checking against the working copy parent
135 # All in all, checking against the working copy seems sensible
136 # revision seems sensible since we want to prevent access to
136 # since we want to prevent access to nested repositories on
137 # nested repositories on the filesystem *now*.
137 # the filesystem *now*.
138 ctx = self['.']
138 ctx = self[None]
139 parts = util.splitpath(subpath)
139 parts = util.splitpath(subpath)
140 while parts:
140 while parts:
141 prefix = os.sep.join(parts)
141 prefix = os.sep.join(parts)
142 if prefix in ctx.substate:
142 if prefix in ctx.substate:
143 if prefix == subpath:
143 if prefix == subpath:
144 return True
144 return True
145 else:
145 else:
146 sub = ctx.sub(prefix)
146 sub = ctx.sub(prefix)
147 return sub.checknested(subpath[len(prefix) + 1:])
147 return sub.checknested(subpath[len(prefix) + 1:])
148 else:
148 else:
149 parts.pop()
149 parts.pop()
150 return False
150 return False
151
151
152
152
153 @propertycache
153 @propertycache
154 def changelog(self):
154 def changelog(self):
155 c = changelog.changelog(self.sopener)
155 c = changelog.changelog(self.sopener)
156 if 'HG_PENDING' in os.environ:
156 if 'HG_PENDING' in os.environ:
157 p = os.environ['HG_PENDING']
157 p = os.environ['HG_PENDING']
158 if p.startswith(self.root):
158 if p.startswith(self.root):
159 c.readpending('00changelog.i.a')
159 c.readpending('00changelog.i.a')
160 self.sopener.options['defversion'] = c.version
160 self.sopener.options['defversion'] = c.version
161 return c
161 return c
162
162
163 @propertycache
163 @propertycache
164 def manifest(self):
164 def manifest(self):
165 return manifest.manifest(self.sopener)
165 return manifest.manifest(self.sopener)
166
166
167 @propertycache
167 @propertycache
168 def dirstate(self):
168 def dirstate(self):
169 return dirstate.dirstate(self.opener, self.ui, self.root)
169 return dirstate.dirstate(self.opener, self.ui, self.root)
170
170
171 def __getitem__(self, changeid):
171 def __getitem__(self, changeid):
172 if changeid is None:
172 if changeid is None:
173 return context.workingctx(self)
173 return context.workingctx(self)
174 return context.changectx(self, changeid)
174 return context.changectx(self, changeid)
175
175
176 def __contains__(self, changeid):
176 def __contains__(self, changeid):
177 try:
177 try:
178 return bool(self.lookup(changeid))
178 return bool(self.lookup(changeid))
179 except error.RepoLookupError:
179 except error.RepoLookupError:
180 return False
180 return False
181
181
182 def __nonzero__(self):
182 def __nonzero__(self):
183 return True
183 return True
184
184
185 def __len__(self):
185 def __len__(self):
186 return len(self.changelog)
186 return len(self.changelog)
187
187
188 def __iter__(self):
188 def __iter__(self):
189 for i in xrange(len(self)):
189 for i in xrange(len(self)):
190 yield i
190 yield i
191
191
192 def url(self):
192 def url(self):
193 return 'file:' + self.root
193 return 'file:' + self.root
194
194
195 def hook(self, name, throw=False, **args):
195 def hook(self, name, throw=False, **args):
196 return hook.hook(self.ui, self, name, throw, **args)
196 return hook.hook(self.ui, self, name, throw, **args)
197
197
198 tag_disallowed = ':\r\n'
198 tag_disallowed = ':\r\n'
199
199
200 def _tag(self, names, node, message, local, user, date, extra={}):
200 def _tag(self, names, node, message, local, user, date, extra={}):
201 if isinstance(names, str):
201 if isinstance(names, str):
202 allchars = names
202 allchars = names
203 names = (names,)
203 names = (names,)
204 else:
204 else:
205 allchars = ''.join(names)
205 allchars = ''.join(names)
206 for c in self.tag_disallowed:
206 for c in self.tag_disallowed:
207 if c in allchars:
207 if c in allchars:
208 raise util.Abort(_('%r cannot be used in a tag name') % c)
208 raise util.Abort(_('%r cannot be used in a tag name') % c)
209
209
210 branches = self.branchmap()
210 branches = self.branchmap()
211 for name in names:
211 for name in names:
212 self.hook('pretag', throw=True, node=hex(node), tag=name,
212 self.hook('pretag', throw=True, node=hex(node), tag=name,
213 local=local)
213 local=local)
214 if name in branches:
214 if name in branches:
215 self.ui.warn(_("warning: tag %s conflicts with existing"
215 self.ui.warn(_("warning: tag %s conflicts with existing"
216 " branch name\n") % name)
216 " branch name\n") % name)
217
217
218 def writetags(fp, names, munge, prevtags):
218 def writetags(fp, names, munge, prevtags):
219 fp.seek(0, 2)
219 fp.seek(0, 2)
220 if prevtags and prevtags[-1] != '\n':
220 if prevtags and prevtags[-1] != '\n':
221 fp.write('\n')
221 fp.write('\n')
222 for name in names:
222 for name in names:
223 m = munge and munge(name) or name
223 m = munge and munge(name) or name
224 if self._tagtypes and name in self._tagtypes:
224 if self._tagtypes and name in self._tagtypes:
225 old = self._tags.get(name, nullid)
225 old = self._tags.get(name, nullid)
226 fp.write('%s %s\n' % (hex(old), m))
226 fp.write('%s %s\n' % (hex(old), m))
227 fp.write('%s %s\n' % (hex(node), m))
227 fp.write('%s %s\n' % (hex(node), m))
228 fp.close()
228 fp.close()
229
229
230 prevtags = ''
230 prevtags = ''
231 if local:
231 if local:
232 try:
232 try:
233 fp = self.opener('localtags', 'r+')
233 fp = self.opener('localtags', 'r+')
234 except IOError:
234 except IOError:
235 fp = self.opener('localtags', 'a')
235 fp = self.opener('localtags', 'a')
236 else:
236 else:
237 prevtags = fp.read()
237 prevtags = fp.read()
238
238
239 # local tags are stored in the current charset
239 # local tags are stored in the current charset
240 writetags(fp, names, None, prevtags)
240 writetags(fp, names, None, prevtags)
241 for name in names:
241 for name in names:
242 self.hook('tag', node=hex(node), tag=name, local=local)
242 self.hook('tag', node=hex(node), tag=name, local=local)
243 return
243 return
244
244
245 try:
245 try:
246 fp = self.wfile('.hgtags', 'rb+')
246 fp = self.wfile('.hgtags', 'rb+')
247 except IOError:
247 except IOError:
248 fp = self.wfile('.hgtags', 'ab')
248 fp = self.wfile('.hgtags', 'ab')
249 else:
249 else:
250 prevtags = fp.read()
250 prevtags = fp.read()
251
251
252 # committed tags are stored in UTF-8
252 # committed tags are stored in UTF-8
253 writetags(fp, names, encoding.fromlocal, prevtags)
253 writetags(fp, names, encoding.fromlocal, prevtags)
254
254
255 if '.hgtags' not in self.dirstate:
255 if '.hgtags' not in self.dirstate:
256 self[None].add(['.hgtags'])
256 self[None].add(['.hgtags'])
257
257
258 m = matchmod.exact(self.root, '', ['.hgtags'])
258 m = matchmod.exact(self.root, '', ['.hgtags'])
259 tagnode = self.commit(message, user, date, extra=extra, match=m)
259 tagnode = self.commit(message, user, date, extra=extra, match=m)
260
260
261 for name in names:
261 for name in names:
262 self.hook('tag', node=hex(node), tag=name, local=local)
262 self.hook('tag', node=hex(node), tag=name, local=local)
263
263
264 return tagnode
264 return tagnode
265
265
266 def tag(self, names, node, message, local, user, date):
266 def tag(self, names, node, message, local, user, date):
267 '''tag a revision with one or more symbolic names.
267 '''tag a revision with one or more symbolic names.
268
268
269 names is a list of strings or, when adding a single tag, names may be a
269 names is a list of strings or, when adding a single tag, names may be a
270 string.
270 string.
271
271
272 if local is True, the tags are stored in a per-repository file.
272 if local is True, the tags are stored in a per-repository file.
273 otherwise, they are stored in the .hgtags file, and a new
273 otherwise, they are stored in the .hgtags file, and a new
274 changeset is committed with the change.
274 changeset is committed with the change.
275
275
276 keyword arguments:
276 keyword arguments:
277
277
278 local: whether to store tags in non-version-controlled file
278 local: whether to store tags in non-version-controlled file
279 (default False)
279 (default False)
280
280
281 message: commit message to use if committing
281 message: commit message to use if committing
282
282
283 user: name of user to use if committing
283 user: name of user to use if committing
284
284
285 date: date tuple to use if committing'''
285 date: date tuple to use if committing'''
286
286
287 for x in self.status()[:5]:
287 for x in self.status()[:5]:
288 if '.hgtags' in x:
288 if '.hgtags' in x:
289 raise util.Abort(_('working copy of .hgtags is changed '
289 raise util.Abort(_('working copy of .hgtags is changed '
290 '(please commit .hgtags manually)'))
290 '(please commit .hgtags manually)'))
291
291
292 self.tags() # instantiate the cache
292 self.tags() # instantiate the cache
293 self._tag(names, node, message, local, user, date)
293 self._tag(names, node, message, local, user, date)
294
294
295 def tags(self):
295 def tags(self):
296 '''return a mapping of tag to node'''
296 '''return a mapping of tag to node'''
297 if self._tags is None:
297 if self._tags is None:
298 (self._tags, self._tagtypes) = self._findtags()
298 (self._tags, self._tagtypes) = self._findtags()
299
299
300 return self._tags
300 return self._tags
301
301
302 def _findtags(self):
302 def _findtags(self):
303 '''Do the hard work of finding tags. Return a pair of dicts
303 '''Do the hard work of finding tags. Return a pair of dicts
304 (tags, tagtypes) where tags maps tag name to node, and tagtypes
304 (tags, tagtypes) where tags maps tag name to node, and tagtypes
305 maps tag name to a string like \'global\' or \'local\'.
305 maps tag name to a string like \'global\' or \'local\'.
306 Subclasses or extensions are free to add their own tags, but
306 Subclasses or extensions are free to add their own tags, but
307 should be aware that the returned dicts will be retained for the
307 should be aware that the returned dicts will be retained for the
308 duration of the localrepo object.'''
308 duration of the localrepo object.'''
309
309
310 # XXX what tagtype should subclasses/extensions use? Currently
310 # XXX what tagtype should subclasses/extensions use? Currently
311 # mq and bookmarks add tags, but do not set the tagtype at all.
311 # mq and bookmarks add tags, but do not set the tagtype at all.
312 # Should each extension invent its own tag type? Should there
312 # Should each extension invent its own tag type? Should there
313 # be one tagtype for all such "virtual" tags? Or is the status
313 # be one tagtype for all such "virtual" tags? Or is the status
314 # quo fine?
314 # quo fine?
315
315
316 alltags = {} # map tag name to (node, hist)
316 alltags = {} # map tag name to (node, hist)
317 tagtypes = {}
317 tagtypes = {}
318
318
319 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
319 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
320 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
320 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
321
321
322 # Build the return dicts. Have to re-encode tag names because
322 # Build the return dicts. Have to re-encode tag names because
323 # the tags module always uses UTF-8 (in order not to lose info
323 # the tags module always uses UTF-8 (in order not to lose info
324 # writing to the cache), but the rest of Mercurial wants them in
324 # writing to the cache), but the rest of Mercurial wants them in
325 # local encoding.
325 # local encoding.
326 tags = {}
326 tags = {}
327 for (name, (node, hist)) in alltags.iteritems():
327 for (name, (node, hist)) in alltags.iteritems():
328 if node != nullid:
328 if node != nullid:
329 tags[encoding.tolocal(name)] = node
329 tags[encoding.tolocal(name)] = node
330 tags['tip'] = self.changelog.tip()
330 tags['tip'] = self.changelog.tip()
331 tagtypes = dict([(encoding.tolocal(name), value)
331 tagtypes = dict([(encoding.tolocal(name), value)
332 for (name, value) in tagtypes.iteritems()])
332 for (name, value) in tagtypes.iteritems()])
333 return (tags, tagtypes)
333 return (tags, tagtypes)
334
334
335 def tagtype(self, tagname):
335 def tagtype(self, tagname):
336 '''
336 '''
337 return the type of the given tag. result can be:
337 return the type of the given tag. result can be:
338
338
339 'local' : a local tag
339 'local' : a local tag
340 'global' : a global tag
340 'global' : a global tag
341 None : tag does not exist
341 None : tag does not exist
342 '''
342 '''
343
343
344 self.tags()
344 self.tags()
345
345
346 return self._tagtypes.get(tagname)
346 return self._tagtypes.get(tagname)
347
347
348 def tagslist(self):
348 def tagslist(self):
349 '''return a list of tags ordered by revision'''
349 '''return a list of tags ordered by revision'''
350 l = []
350 l = []
351 for t, n in self.tags().iteritems():
351 for t, n in self.tags().iteritems():
352 try:
352 try:
353 r = self.changelog.rev(n)
353 r = self.changelog.rev(n)
354 except:
354 except:
355 r = -2 # sort to the beginning of the list if unknown
355 r = -2 # sort to the beginning of the list if unknown
356 l.append((r, t, n))
356 l.append((r, t, n))
357 return [(t, n) for r, t, n in sorted(l)]
357 return [(t, n) for r, t, n in sorted(l)]
358
358
359 def nodetags(self, node):
359 def nodetags(self, node):
360 '''return the tags associated with a node'''
360 '''return the tags associated with a node'''
361 if not self.nodetagscache:
361 if not self.nodetagscache:
362 self.nodetagscache = {}
362 self.nodetagscache = {}
363 for t, n in self.tags().iteritems():
363 for t, n in self.tags().iteritems():
364 self.nodetagscache.setdefault(n, []).append(t)
364 self.nodetagscache.setdefault(n, []).append(t)
365 for tags in self.nodetagscache.itervalues():
365 for tags in self.nodetagscache.itervalues():
366 tags.sort()
366 tags.sort()
367 return self.nodetagscache.get(node, [])
367 return self.nodetagscache.get(node, [])
368
368
369 def _branchtags(self, partial, lrev):
369 def _branchtags(self, partial, lrev):
370 # TODO: rename this function?
370 # TODO: rename this function?
371 tiprev = len(self) - 1
371 tiprev = len(self) - 1
372 if lrev != tiprev:
372 if lrev != tiprev:
373 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
373 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
374 self._updatebranchcache(partial, ctxgen)
374 self._updatebranchcache(partial, ctxgen)
375 self._writebranchcache(partial, self.changelog.tip(), tiprev)
375 self._writebranchcache(partial, self.changelog.tip(), tiprev)
376
376
377 return partial
377 return partial
378
378
379 def updatebranchcache(self):
379 def updatebranchcache(self):
380 tip = self.changelog.tip()
380 tip = self.changelog.tip()
381 if self._branchcache is not None and self._branchcachetip == tip:
381 if self._branchcache is not None and self._branchcachetip == tip:
382 return self._branchcache
382 return self._branchcache
383
383
384 oldtip = self._branchcachetip
384 oldtip = self._branchcachetip
385 self._branchcachetip = tip
385 self._branchcachetip = tip
386 if oldtip is None or oldtip not in self.changelog.nodemap:
386 if oldtip is None or oldtip not in self.changelog.nodemap:
387 partial, last, lrev = self._readbranchcache()
387 partial, last, lrev = self._readbranchcache()
388 else:
388 else:
389 lrev = self.changelog.rev(oldtip)
389 lrev = self.changelog.rev(oldtip)
390 partial = self._branchcache
390 partial = self._branchcache
391
391
392 self._branchtags(partial, lrev)
392 self._branchtags(partial, lrev)
393 # this private cache holds all heads (not just tips)
393 # this private cache holds all heads (not just tips)
394 self._branchcache = partial
394 self._branchcache = partial
395
395
396 def branchmap(self):
396 def branchmap(self):
397 '''returns a dictionary {branch: [branchheads]}'''
397 '''returns a dictionary {branch: [branchheads]}'''
398 self.updatebranchcache()
398 self.updatebranchcache()
399 return self._branchcache
399 return self._branchcache
400
400
401 def branchtags(self):
401 def branchtags(self):
402 '''return a dict where branch names map to the tipmost head of
402 '''return a dict where branch names map to the tipmost head of
403 the branch, open heads come before closed'''
403 the branch, open heads come before closed'''
404 bt = {}
404 bt = {}
405 for bn, heads in self.branchmap().iteritems():
405 for bn, heads in self.branchmap().iteritems():
406 tip = heads[-1]
406 tip = heads[-1]
407 for h in reversed(heads):
407 for h in reversed(heads):
408 if 'close' not in self.changelog.read(h)[5]:
408 if 'close' not in self.changelog.read(h)[5]:
409 tip = h
409 tip = h
410 break
410 break
411 bt[bn] = tip
411 bt[bn] = tip
412 return bt
412 return bt
413
413
414
414
415 def _readbranchcache(self):
415 def _readbranchcache(self):
416 partial = {}
416 partial = {}
417 try:
417 try:
418 f = self.opener("branchheads.cache")
418 f = self.opener("branchheads.cache")
419 lines = f.read().split('\n')
419 lines = f.read().split('\n')
420 f.close()
420 f.close()
421 except (IOError, OSError):
421 except (IOError, OSError):
422 return {}, nullid, nullrev
422 return {}, nullid, nullrev
423
423
424 try:
424 try:
425 last, lrev = lines.pop(0).split(" ", 1)
425 last, lrev = lines.pop(0).split(" ", 1)
426 last, lrev = bin(last), int(lrev)
426 last, lrev = bin(last), int(lrev)
427 if lrev >= len(self) or self[lrev].node() != last:
427 if lrev >= len(self) or self[lrev].node() != last:
428 # invalidate the cache
428 # invalidate the cache
429 raise ValueError('invalidating branch cache (tip differs)')
429 raise ValueError('invalidating branch cache (tip differs)')
430 for l in lines:
430 for l in lines:
431 if not l:
431 if not l:
432 continue
432 continue
433 node, label = l.split(" ", 1)
433 node, label = l.split(" ", 1)
434 partial.setdefault(label.strip(), []).append(bin(node))
434 partial.setdefault(label.strip(), []).append(bin(node))
435 except KeyboardInterrupt:
435 except KeyboardInterrupt:
436 raise
436 raise
437 except Exception, inst:
437 except Exception, inst:
438 if self.ui.debugflag:
438 if self.ui.debugflag:
439 self.ui.warn(str(inst), '\n')
439 self.ui.warn(str(inst), '\n')
440 partial, last, lrev = {}, nullid, nullrev
440 partial, last, lrev = {}, nullid, nullrev
441 return partial, last, lrev
441 return partial, last, lrev
442
442
443 def _writebranchcache(self, branches, tip, tiprev):
443 def _writebranchcache(self, branches, tip, tiprev):
444 try:
444 try:
445 f = self.opener("branchheads.cache", "w", atomictemp=True)
445 f = self.opener("branchheads.cache", "w", atomictemp=True)
446 f.write("%s %s\n" % (hex(tip), tiprev))
446 f.write("%s %s\n" % (hex(tip), tiprev))
447 for label, nodes in branches.iteritems():
447 for label, nodes in branches.iteritems():
448 for node in nodes:
448 for node in nodes:
449 f.write("%s %s\n" % (hex(node), label))
449 f.write("%s %s\n" % (hex(node), label))
450 f.rename()
450 f.rename()
451 except (IOError, OSError):
451 except (IOError, OSError):
452 pass
452 pass
453
453
454 def _updatebranchcache(self, partial, ctxgen):
454 def _updatebranchcache(self, partial, ctxgen):
455 # collect new branch entries
455 # collect new branch entries
456 newbranches = {}
456 newbranches = {}
457 for c in ctxgen:
457 for c in ctxgen:
458 newbranches.setdefault(c.branch(), []).append(c.node())
458 newbranches.setdefault(c.branch(), []).append(c.node())
459 # if older branchheads are reachable from new ones, they aren't
459 # if older branchheads are reachable from new ones, they aren't
460 # really branchheads. Note checking parents is insufficient:
460 # really branchheads. Note checking parents is insufficient:
461 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
461 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
462 for branch, newnodes in newbranches.iteritems():
462 for branch, newnodes in newbranches.iteritems():
463 bheads = partial.setdefault(branch, [])
463 bheads = partial.setdefault(branch, [])
464 bheads.extend(newnodes)
464 bheads.extend(newnodes)
465 if len(bheads) <= 1:
465 if len(bheads) <= 1:
466 continue
466 continue
467 # starting from tip means fewer passes over reachable
467 # starting from tip means fewer passes over reachable
468 while newnodes:
468 while newnodes:
469 latest = newnodes.pop()
469 latest = newnodes.pop()
470 if latest not in bheads:
470 if latest not in bheads:
471 continue
471 continue
472 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
472 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
473 reachable = self.changelog.reachable(latest, minbhrev)
473 reachable = self.changelog.reachable(latest, minbhrev)
474 reachable.remove(latest)
474 reachable.remove(latest)
475 bheads = [b for b in bheads if b not in reachable]
475 bheads = [b for b in bheads if b not in reachable]
476 partial[branch] = bheads
476 partial[branch] = bheads
477
477
478 def lookup(self, key):
478 def lookup(self, key):
479 if isinstance(key, int):
479 if isinstance(key, int):
480 return self.changelog.node(key)
480 return self.changelog.node(key)
481 elif key == '.':
481 elif key == '.':
482 return self.dirstate.parents()[0]
482 return self.dirstate.parents()[0]
483 elif key == 'null':
483 elif key == 'null':
484 return nullid
484 return nullid
485 elif key == 'tip':
485 elif key == 'tip':
486 return self.changelog.tip()
486 return self.changelog.tip()
487 n = self.changelog._match(key)
487 n = self.changelog._match(key)
488 if n:
488 if n:
489 return n
489 return n
490 if key in self.tags():
490 if key in self.tags():
491 return self.tags()[key]
491 return self.tags()[key]
492 if key in self.branchtags():
492 if key in self.branchtags():
493 return self.branchtags()[key]
493 return self.branchtags()[key]
494 n = self.changelog._partialmatch(key)
494 n = self.changelog._partialmatch(key)
495 if n:
495 if n:
496 return n
496 return n
497
497
498 # can't find key, check if it might have come from damaged dirstate
498 # can't find key, check if it might have come from damaged dirstate
499 if key in self.dirstate.parents():
499 if key in self.dirstate.parents():
500 raise error.Abort(_("working directory has unknown parent '%s'!")
500 raise error.Abort(_("working directory has unknown parent '%s'!")
501 % short(key))
501 % short(key))
502 try:
502 try:
503 if len(key) == 20:
503 if len(key) == 20:
504 key = hex(key)
504 key = hex(key)
505 except:
505 except:
506 pass
506 pass
507 raise error.RepoLookupError(_("unknown revision '%s'") % key)
507 raise error.RepoLookupError(_("unknown revision '%s'") % key)
508
508
509 def lookupbranch(self, key, remote=None):
509 def lookupbranch(self, key, remote=None):
510 repo = remote or self
510 repo = remote or self
511 if key in repo.branchmap():
511 if key in repo.branchmap():
512 return key
512 return key
513
513
514 repo = (remote and remote.local()) and remote or self
514 repo = (remote and remote.local()) and remote or self
515 return repo[key].branch()
515 return repo[key].branch()
516
516
517 def local(self):
517 def local(self):
518 return True
518 return True
519
519
520 def join(self, f):
520 def join(self, f):
521 return os.path.join(self.path, f)
521 return os.path.join(self.path, f)
522
522
523 def wjoin(self, f):
523 def wjoin(self, f):
524 return os.path.join(self.root, f)
524 return os.path.join(self.root, f)
525
525
526 def file(self, f):
526 def file(self, f):
527 if f[0] == '/':
527 if f[0] == '/':
528 f = f[1:]
528 f = f[1:]
529 return filelog.filelog(self.sopener, f)
529 return filelog.filelog(self.sopener, f)
530
530
531 def changectx(self, changeid):
531 def changectx(self, changeid):
532 return self[changeid]
532 return self[changeid]
533
533
534 def parents(self, changeid=None):
534 def parents(self, changeid=None):
535 '''get list of changectxs for parents of changeid'''
535 '''get list of changectxs for parents of changeid'''
536 return self[changeid].parents()
536 return self[changeid].parents()
537
537
538 def filectx(self, path, changeid=None, fileid=None):
538 def filectx(self, path, changeid=None, fileid=None):
539 """changeid can be a changeset revision, node, or tag.
539 """changeid can be a changeset revision, node, or tag.
540 fileid can be a file revision or node."""
540 fileid can be a file revision or node."""
541 return context.filectx(self, path, changeid, fileid)
541 return context.filectx(self, path, changeid, fileid)
542
542
543 def getcwd(self):
543 def getcwd(self):
544 return self.dirstate.getcwd()
544 return self.dirstate.getcwd()
545
545
546 def pathto(self, f, cwd=None):
546 def pathto(self, f, cwd=None):
547 return self.dirstate.pathto(f, cwd)
547 return self.dirstate.pathto(f, cwd)
548
548
549 def wfile(self, f, mode='r'):
549 def wfile(self, f, mode='r'):
550 return self.wopener(f, mode)
550 return self.wopener(f, mode)
551
551
552 def _link(self, f):
552 def _link(self, f):
553 return os.path.islink(self.wjoin(f))
553 return os.path.islink(self.wjoin(f))
554
554
555 def _loadfilter(self, filter):
555 def _loadfilter(self, filter):
556 if filter not in self.filterpats:
556 if filter not in self.filterpats:
557 l = []
557 l = []
558 for pat, cmd in self.ui.configitems(filter):
558 for pat, cmd in self.ui.configitems(filter):
559 if cmd == '!':
559 if cmd == '!':
560 continue
560 continue
561 mf = matchmod.match(self.root, '', [pat])
561 mf = matchmod.match(self.root, '', [pat])
562 fn = None
562 fn = None
563 params = cmd
563 params = cmd
564 for name, filterfn in self._datafilters.iteritems():
564 for name, filterfn in self._datafilters.iteritems():
565 if cmd.startswith(name):
565 if cmd.startswith(name):
566 fn = filterfn
566 fn = filterfn
567 params = cmd[len(name):].lstrip()
567 params = cmd[len(name):].lstrip()
568 break
568 break
569 if not fn:
569 if not fn:
570 fn = lambda s, c, **kwargs: util.filter(s, c)
570 fn = lambda s, c, **kwargs: util.filter(s, c)
571 # Wrap old filters not supporting keyword arguments
571 # Wrap old filters not supporting keyword arguments
572 if not inspect.getargspec(fn)[2]:
572 if not inspect.getargspec(fn)[2]:
573 oldfn = fn
573 oldfn = fn
574 fn = lambda s, c, **kwargs: oldfn(s, c)
574 fn = lambda s, c, **kwargs: oldfn(s, c)
575 l.append((mf, fn, params))
575 l.append((mf, fn, params))
576 self.filterpats[filter] = l
576 self.filterpats[filter] = l
577
577
578 def _filter(self, filter, filename, data):
578 def _filter(self, filter, filename, data):
579 self._loadfilter(filter)
579 self._loadfilter(filter)
580
580
581 for mf, fn, cmd in self.filterpats[filter]:
581 for mf, fn, cmd in self.filterpats[filter]:
582 if mf(filename):
582 if mf(filename):
583 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
583 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
584 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
584 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
585 break
585 break
586
586
587 return data
587 return data
588
588
589 def adddatafilter(self, name, filter):
589 def adddatafilter(self, name, filter):
590 self._datafilters[name] = filter
590 self._datafilters[name] = filter
591
591
592 def wread(self, filename):
592 def wread(self, filename):
593 if self._link(filename):
593 if self._link(filename):
594 data = os.readlink(self.wjoin(filename))
594 data = os.readlink(self.wjoin(filename))
595 else:
595 else:
596 data = self.wopener(filename, 'r').read()
596 data = self.wopener(filename, 'r').read()
597 return self._filter("encode", filename, data)
597 return self._filter("encode", filename, data)
598
598
599 def wwrite(self, filename, data, flags):
599 def wwrite(self, filename, data, flags):
600 data = self._filter("decode", filename, data)
600 data = self._filter("decode", filename, data)
601 try:
601 try:
602 os.unlink(self.wjoin(filename))
602 os.unlink(self.wjoin(filename))
603 except OSError:
603 except OSError:
604 pass
604 pass
605 if 'l' in flags:
605 if 'l' in flags:
606 self.wopener.symlink(data, filename)
606 self.wopener.symlink(data, filename)
607 else:
607 else:
608 self.wopener(filename, 'w').write(data)
608 self.wopener(filename, 'w').write(data)
609 if 'x' in flags:
609 if 'x' in flags:
610 util.set_flags(self.wjoin(filename), False, True)
610 util.set_flags(self.wjoin(filename), False, True)
611
611
612 def wwritedata(self, filename, data):
612 def wwritedata(self, filename, data):
613 return self._filter("decode", filename, data)
613 return self._filter("decode", filename, data)
614
614
615 def transaction(self, desc):
615 def transaction(self, desc):
616 tr = self._transref and self._transref() or None
616 tr = self._transref and self._transref() or None
617 if tr and tr.running():
617 if tr and tr.running():
618 return tr.nest()
618 return tr.nest()
619
619
620 # abort here if the journal already exists
620 # abort here if the journal already exists
621 if os.path.exists(self.sjoin("journal")):
621 if os.path.exists(self.sjoin("journal")):
622 raise error.RepoError(
622 raise error.RepoError(
623 _("abandoned transaction found - run hg recover"))
623 _("abandoned transaction found - run hg recover"))
624
624
625 # save dirstate for rollback
625 # save dirstate for rollback
626 try:
626 try:
627 ds = self.opener("dirstate").read()
627 ds = self.opener("dirstate").read()
628 except IOError:
628 except IOError:
629 ds = ""
629 ds = ""
630 self.opener("journal.dirstate", "w").write(ds)
630 self.opener("journal.dirstate", "w").write(ds)
631 self.opener("journal.branch", "w").write(self.dirstate.branch())
631 self.opener("journal.branch", "w").write(self.dirstate.branch())
632 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
632 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
633
633
634 renames = [(self.sjoin("journal"), self.sjoin("undo")),
634 renames = [(self.sjoin("journal"), self.sjoin("undo")),
635 (self.join("journal.dirstate"), self.join("undo.dirstate")),
635 (self.join("journal.dirstate"), self.join("undo.dirstate")),
636 (self.join("journal.branch"), self.join("undo.branch")),
636 (self.join("journal.branch"), self.join("undo.branch")),
637 (self.join("journal.desc"), self.join("undo.desc"))]
637 (self.join("journal.desc"), self.join("undo.desc"))]
638 tr = transaction.transaction(self.ui.warn, self.sopener,
638 tr = transaction.transaction(self.ui.warn, self.sopener,
639 self.sjoin("journal"),
639 self.sjoin("journal"),
640 aftertrans(renames),
640 aftertrans(renames),
641 self.store.createmode)
641 self.store.createmode)
642 self._transref = weakref.ref(tr)
642 self._transref = weakref.ref(tr)
643 return tr
643 return tr
644
644
645 def recover(self):
645 def recover(self):
646 lock = self.lock()
646 lock = self.lock()
647 try:
647 try:
648 if os.path.exists(self.sjoin("journal")):
648 if os.path.exists(self.sjoin("journal")):
649 self.ui.status(_("rolling back interrupted transaction\n"))
649 self.ui.status(_("rolling back interrupted transaction\n"))
650 transaction.rollback(self.sopener, self.sjoin("journal"),
650 transaction.rollback(self.sopener, self.sjoin("journal"),
651 self.ui.warn)
651 self.ui.warn)
652 self.invalidate()
652 self.invalidate()
653 return True
653 return True
654 else:
654 else:
655 self.ui.warn(_("no interrupted transaction available\n"))
655 self.ui.warn(_("no interrupted transaction available\n"))
656 return False
656 return False
657 finally:
657 finally:
658 lock.release()
658 lock.release()
659
659
660 def rollback(self, dryrun=False):
660 def rollback(self, dryrun=False):
661 wlock = lock = None
661 wlock = lock = None
662 try:
662 try:
663 wlock = self.wlock()
663 wlock = self.wlock()
664 lock = self.lock()
664 lock = self.lock()
665 if os.path.exists(self.sjoin("undo")):
665 if os.path.exists(self.sjoin("undo")):
666 try:
666 try:
667 args = self.opener("undo.desc", "r").read().splitlines()
667 args = self.opener("undo.desc", "r").read().splitlines()
668 if len(args) >= 3 and self.ui.verbose:
668 if len(args) >= 3 and self.ui.verbose:
669 desc = _("rolling back to revision %s"
669 desc = _("rolling back to revision %s"
670 " (undo %s: %s)\n") % (
670 " (undo %s: %s)\n") % (
671 int(args[0]) - 1, args[1], args[2])
671 int(args[0]) - 1, args[1], args[2])
672 elif len(args) >= 2:
672 elif len(args) >= 2:
673 desc = _("rolling back to revision %s (undo %s)\n") % (
673 desc = _("rolling back to revision %s (undo %s)\n") % (
674 int(args[0]) - 1, args[1])
674 int(args[0]) - 1, args[1])
675 except IOError:
675 except IOError:
676 desc = _("rolling back unknown transaction\n")
676 desc = _("rolling back unknown transaction\n")
677 self.ui.status(desc)
677 self.ui.status(desc)
678 if dryrun:
678 if dryrun:
679 return
679 return
680 transaction.rollback(self.sopener, self.sjoin("undo"),
680 transaction.rollback(self.sopener, self.sjoin("undo"),
681 self.ui.warn)
681 self.ui.warn)
682 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
682 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
683 try:
683 try:
684 branch = self.opener("undo.branch").read()
684 branch = self.opener("undo.branch").read()
685 self.dirstate.setbranch(branch)
685 self.dirstate.setbranch(branch)
686 except IOError:
686 except IOError:
687 self.ui.warn(_("Named branch could not be reset, "
687 self.ui.warn(_("Named branch could not be reset, "
688 "current branch still is: %s\n")
688 "current branch still is: %s\n")
689 % encoding.tolocal(self.dirstate.branch()))
689 % encoding.tolocal(self.dirstate.branch()))
690 self.invalidate()
690 self.invalidate()
691 self.dirstate.invalidate()
691 self.dirstate.invalidate()
692 self.destroyed()
692 self.destroyed()
693 else:
693 else:
694 self.ui.warn(_("no rollback information available\n"))
694 self.ui.warn(_("no rollback information available\n"))
695 return 1
695 return 1
696 finally:
696 finally:
697 release(lock, wlock)
697 release(lock, wlock)
698
698
699 def invalidatecaches(self):
699 def invalidatecaches(self):
700 self._tags = None
700 self._tags = None
701 self._tagtypes = None
701 self._tagtypes = None
702 self.nodetagscache = None
702 self.nodetagscache = None
703 self._branchcache = None # in UTF-8
703 self._branchcache = None # in UTF-8
704 self._branchcachetip = None
704 self._branchcachetip = None
705
705
706 def invalidate(self):
706 def invalidate(self):
707 for a in "changelog manifest".split():
707 for a in "changelog manifest".split():
708 if a in self.__dict__:
708 if a in self.__dict__:
709 delattr(self, a)
709 delattr(self, a)
710 self.invalidatecaches()
710 self.invalidatecaches()
711
711
712 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
712 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
713 try:
713 try:
714 l = lock.lock(lockname, 0, releasefn, desc=desc)
714 l = lock.lock(lockname, 0, releasefn, desc=desc)
715 except error.LockHeld, inst:
715 except error.LockHeld, inst:
716 if not wait:
716 if not wait:
717 raise
717 raise
718 self.ui.warn(_("waiting for lock on %s held by %r\n") %
718 self.ui.warn(_("waiting for lock on %s held by %r\n") %
719 (desc, inst.locker))
719 (desc, inst.locker))
720 # default to 600 seconds timeout
720 # default to 600 seconds timeout
721 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
721 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
722 releasefn, desc=desc)
722 releasefn, desc=desc)
723 if acquirefn:
723 if acquirefn:
724 acquirefn()
724 acquirefn()
725 return l
725 return l
726
726
727 def lock(self, wait=True):
727 def lock(self, wait=True):
728 '''Lock the repository store (.hg/store) and return a weak reference
728 '''Lock the repository store (.hg/store) and return a weak reference
729 to the lock. Use this before modifying the store (e.g. committing or
729 to the lock. Use this before modifying the store (e.g. committing or
730 stripping). If you are opening a transaction, get a lock as well.)'''
730 stripping). If you are opening a transaction, get a lock as well.)'''
731 l = self._lockref and self._lockref()
731 l = self._lockref and self._lockref()
732 if l is not None and l.held:
732 if l is not None and l.held:
733 l.lock()
733 l.lock()
734 return l
734 return l
735
735
736 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
736 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
737 _('repository %s') % self.origroot)
737 _('repository %s') % self.origroot)
738 self._lockref = weakref.ref(l)
738 self._lockref = weakref.ref(l)
739 return l
739 return l
740
740
741 def wlock(self, wait=True):
741 def wlock(self, wait=True):
742 '''Lock the non-store parts of the repository (everything under
742 '''Lock the non-store parts of the repository (everything under
743 .hg except .hg/store) and return a weak reference to the lock.
743 .hg except .hg/store) and return a weak reference to the lock.
744 Use this before modifying files in .hg.'''
744 Use this before modifying files in .hg.'''
745 l = self._wlockref and self._wlockref()
745 l = self._wlockref and self._wlockref()
746 if l is not None and l.held:
746 if l is not None and l.held:
747 l.lock()
747 l.lock()
748 return l
748 return l
749
749
750 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
750 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
751 self.dirstate.invalidate, _('working directory of %s') %
751 self.dirstate.invalidate, _('working directory of %s') %
752 self.origroot)
752 self.origroot)
753 self._wlockref = weakref.ref(l)
753 self._wlockref = weakref.ref(l)
754 return l
754 return l
755
755
756 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
756 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
757 """
757 """
758 commit an individual file as part of a larger transaction
758 commit an individual file as part of a larger transaction
759 """
759 """
760
760
761 fname = fctx.path()
761 fname = fctx.path()
762 text = fctx.data()
762 text = fctx.data()
763 flog = self.file(fname)
763 flog = self.file(fname)
764 fparent1 = manifest1.get(fname, nullid)
764 fparent1 = manifest1.get(fname, nullid)
765 fparent2 = fparent2o = manifest2.get(fname, nullid)
765 fparent2 = fparent2o = manifest2.get(fname, nullid)
766
766
767 meta = {}
767 meta = {}
768 copy = fctx.renamed()
768 copy = fctx.renamed()
769 if copy and copy[0] != fname:
769 if copy and copy[0] != fname:
770 # Mark the new revision of this file as a copy of another
770 # Mark the new revision of this file as a copy of another
771 # file. This copy data will effectively act as a parent
771 # file. This copy data will effectively act as a parent
772 # of this new revision. If this is a merge, the first
772 # of this new revision. If this is a merge, the first
773 # parent will be the nullid (meaning "look up the copy data")
773 # parent will be the nullid (meaning "look up the copy data")
774 # and the second one will be the other parent. For example:
774 # and the second one will be the other parent. For example:
775 #
775 #
776 # 0 --- 1 --- 3 rev1 changes file foo
776 # 0 --- 1 --- 3 rev1 changes file foo
777 # \ / rev2 renames foo to bar and changes it
777 # \ / rev2 renames foo to bar and changes it
778 # \- 2 -/ rev3 should have bar with all changes and
778 # \- 2 -/ rev3 should have bar with all changes and
779 # should record that bar descends from
779 # should record that bar descends from
780 # bar in rev2 and foo in rev1
780 # bar in rev2 and foo in rev1
781 #
781 #
782 # this allows this merge to succeed:
782 # this allows this merge to succeed:
783 #
783 #
784 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
784 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
785 # \ / merging rev3 and rev4 should use bar@rev2
785 # \ / merging rev3 and rev4 should use bar@rev2
786 # \- 2 --- 4 as the merge base
786 # \- 2 --- 4 as the merge base
787 #
787 #
788
788
789 cfname = copy[0]
789 cfname = copy[0]
790 crev = manifest1.get(cfname)
790 crev = manifest1.get(cfname)
791 newfparent = fparent2
791 newfparent = fparent2
792
792
793 if manifest2: # branch merge
793 if manifest2: # branch merge
794 if fparent2 == nullid or crev is None: # copied on remote side
794 if fparent2 == nullid or crev is None: # copied on remote side
795 if cfname in manifest2:
795 if cfname in manifest2:
796 crev = manifest2[cfname]
796 crev = manifest2[cfname]
797 newfparent = fparent1
797 newfparent = fparent1
798
798
799 # find source in nearest ancestor if we've lost track
799 # find source in nearest ancestor if we've lost track
800 if not crev:
800 if not crev:
801 self.ui.debug(" %s: searching for copy revision for %s\n" %
801 self.ui.debug(" %s: searching for copy revision for %s\n" %
802 (fname, cfname))
802 (fname, cfname))
803 for ancestor in self['.'].ancestors():
803 for ancestor in self['.'].ancestors():
804 if cfname in ancestor:
804 if cfname in ancestor:
805 crev = ancestor[cfname].filenode()
805 crev = ancestor[cfname].filenode()
806 break
806 break
807
807
808 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
808 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
809 meta["copy"] = cfname
809 meta["copy"] = cfname
810 meta["copyrev"] = hex(crev)
810 meta["copyrev"] = hex(crev)
811 fparent1, fparent2 = nullid, newfparent
811 fparent1, fparent2 = nullid, newfparent
812 elif fparent2 != nullid:
812 elif fparent2 != nullid:
813 # is one parent an ancestor of the other?
813 # is one parent an ancestor of the other?
814 fparentancestor = flog.ancestor(fparent1, fparent2)
814 fparentancestor = flog.ancestor(fparent1, fparent2)
815 if fparentancestor == fparent1:
815 if fparentancestor == fparent1:
816 fparent1, fparent2 = fparent2, nullid
816 fparent1, fparent2 = fparent2, nullid
817 elif fparentancestor == fparent2:
817 elif fparentancestor == fparent2:
818 fparent2 = nullid
818 fparent2 = nullid
819
819
820 # is the file changed?
820 # is the file changed?
821 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
821 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
822 changelist.append(fname)
822 changelist.append(fname)
823 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
823 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
824
824
825 # are just the flags changed during merge?
825 # are just the flags changed during merge?
826 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
826 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
827 changelist.append(fname)
827 changelist.append(fname)
828
828
829 return fparent1
829 return fparent1
830
830
831 def commit(self, text="", user=None, date=None, match=None, force=False,
831 def commit(self, text="", user=None, date=None, match=None, force=False,
832 editor=False, extra={}):
832 editor=False, extra={}):
833 """Add a new revision to current repository.
833 """Add a new revision to current repository.
834
834
835 Revision information is gathered from the working directory,
835 Revision information is gathered from the working directory,
836 match can be used to filter the committed files. If editor is
836 match can be used to filter the committed files. If editor is
837 supplied, it is called to get a commit message.
837 supplied, it is called to get a commit message.
838 """
838 """
839
839
840 def fail(f, msg):
840 def fail(f, msg):
841 raise util.Abort('%s: %s' % (f, msg))
841 raise util.Abort('%s: %s' % (f, msg))
842
842
843 if not match:
843 if not match:
844 match = matchmod.always(self.root, '')
844 match = matchmod.always(self.root, '')
845
845
846 if not force:
846 if not force:
847 vdirs = []
847 vdirs = []
848 match.dir = vdirs.append
848 match.dir = vdirs.append
849 match.bad = fail
849 match.bad = fail
850
850
851 wlock = self.wlock()
851 wlock = self.wlock()
852 try:
852 try:
853 wctx = self[None]
853 wctx = self[None]
854 merge = len(wctx.parents()) > 1
854 merge = len(wctx.parents()) > 1
855
855
856 if (not force and merge and match and
856 if (not force and merge and match and
857 (match.files() or match.anypats())):
857 (match.files() or match.anypats())):
858 raise util.Abort(_('cannot partially commit a merge '
858 raise util.Abort(_('cannot partially commit a merge '
859 '(do not specify files or patterns)'))
859 '(do not specify files or patterns)'))
860
860
861 changes = self.status(match=match, clean=force)
861 changes = self.status(match=match, clean=force)
862 if force:
862 if force:
863 changes[0].extend(changes[6]) # mq may commit unchanged files
863 changes[0].extend(changes[6]) # mq may commit unchanged files
864
864
865 # check subrepos
865 # check subrepos
866 subs = []
866 subs = []
867 removedsubs = set()
867 removedsubs = set()
868 for p in wctx.parents():
868 for p in wctx.parents():
869 removedsubs.update(s for s in p.substate if match(s))
869 removedsubs.update(s for s in p.substate if match(s))
870 for s in wctx.substate:
870 for s in wctx.substate:
871 removedsubs.discard(s)
871 removedsubs.discard(s)
872 if match(s) and wctx.sub(s).dirty():
872 if match(s) and wctx.sub(s).dirty():
873 subs.append(s)
873 subs.append(s)
874 if (subs or removedsubs):
874 if (subs or removedsubs):
875 if (not match('.hgsub') and
875 if (not match('.hgsub') and
876 '.hgsub' in (wctx.modified() + wctx.added())):
876 '.hgsub' in (wctx.modified() + wctx.added())):
877 raise util.Abort(_("can't commit subrepos without .hgsub"))
877 raise util.Abort(_("can't commit subrepos without .hgsub"))
878 if '.hgsubstate' not in changes[0]:
878 if '.hgsubstate' not in changes[0]:
879 changes[0].insert(0, '.hgsubstate')
879 changes[0].insert(0, '.hgsubstate')
880
880
881 # make sure all explicit patterns are matched
881 # make sure all explicit patterns are matched
882 if not force and match.files():
882 if not force and match.files():
883 matched = set(changes[0] + changes[1] + changes[2])
883 matched = set(changes[0] + changes[1] + changes[2])
884
884
885 for f in match.files():
885 for f in match.files():
886 if f == '.' or f in matched or f in wctx.substate:
886 if f == '.' or f in matched or f in wctx.substate:
887 continue
887 continue
888 if f in changes[3]: # missing
888 if f in changes[3]: # missing
889 fail(f, _('file not found!'))
889 fail(f, _('file not found!'))
890 if f in vdirs: # visited directory
890 if f in vdirs: # visited directory
891 d = f + '/'
891 d = f + '/'
892 for mf in matched:
892 for mf in matched:
893 if mf.startswith(d):
893 if mf.startswith(d):
894 break
894 break
895 else:
895 else:
896 fail(f, _("no match under directory!"))
896 fail(f, _("no match under directory!"))
897 elif f not in self.dirstate:
897 elif f not in self.dirstate:
898 fail(f, _("file not tracked!"))
898 fail(f, _("file not tracked!"))
899
899
900 if (not force and not extra.get("close") and not merge
900 if (not force and not extra.get("close") and not merge
901 and not (changes[0] or changes[1] or changes[2])
901 and not (changes[0] or changes[1] or changes[2])
902 and wctx.branch() == wctx.p1().branch()):
902 and wctx.branch() == wctx.p1().branch()):
903 return None
903 return None
904
904
905 ms = mergemod.mergestate(self)
905 ms = mergemod.mergestate(self)
906 for f in changes[0]:
906 for f in changes[0]:
907 if f in ms and ms[f] == 'u':
907 if f in ms and ms[f] == 'u':
908 raise util.Abort(_("unresolved merge conflicts "
908 raise util.Abort(_("unresolved merge conflicts "
909 "(see hg resolve)"))
909 "(see hg resolve)"))
910
910
911 cctx = context.workingctx(self, text, user, date, extra, changes)
911 cctx = context.workingctx(self, text, user, date, extra, changes)
912 if editor:
912 if editor:
913 cctx._text = editor(self, cctx, subs)
913 cctx._text = editor(self, cctx, subs)
914 edited = (text != cctx._text)
914 edited = (text != cctx._text)
915
915
916 # commit subs
916 # commit subs
917 if subs or removedsubs:
917 if subs or removedsubs:
918 state = wctx.substate.copy()
918 state = wctx.substate.copy()
919 for s in sorted(subs):
919 for s in sorted(subs):
920 sub = wctx.sub(s)
920 sub = wctx.sub(s)
921 self.ui.status(_('committing subrepository %s\n') %
921 self.ui.status(_('committing subrepository %s\n') %
922 subrepo.relpath(sub))
922 subrepo.relpath(sub))
923 sr = sub.commit(cctx._text, user, date)
923 sr = sub.commit(cctx._text, user, date)
924 state[s] = (state[s][0], sr)
924 state[s] = (state[s][0], sr)
925 subrepo.writestate(self, state)
925 subrepo.writestate(self, state)
926
926
927 # Save commit message in case this transaction gets rolled back
927 # Save commit message in case this transaction gets rolled back
928 # (e.g. by a pretxncommit hook). Leave the content alone on
928 # (e.g. by a pretxncommit hook). Leave the content alone on
929 # the assumption that the user will use the same editor again.
929 # the assumption that the user will use the same editor again.
930 msgfile = self.opener('last-message.txt', 'wb')
930 msgfile = self.opener('last-message.txt', 'wb')
931 msgfile.write(cctx._text)
931 msgfile.write(cctx._text)
932 msgfile.close()
932 msgfile.close()
933
933
934 p1, p2 = self.dirstate.parents()
934 p1, p2 = self.dirstate.parents()
935 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
935 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
936 try:
936 try:
937 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
937 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
938 ret = self.commitctx(cctx, True)
938 ret = self.commitctx(cctx, True)
939 except:
939 except:
940 if edited:
940 if edited:
941 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
941 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
942 self.ui.write(
942 self.ui.write(
943 _('note: commit message saved in %s\n') % msgfn)
943 _('note: commit message saved in %s\n') % msgfn)
944 raise
944 raise
945
945
946 # update dirstate and mergestate
946 # update dirstate and mergestate
947 for f in changes[0] + changes[1]:
947 for f in changes[0] + changes[1]:
948 self.dirstate.normal(f)
948 self.dirstate.normal(f)
949 for f in changes[2]:
949 for f in changes[2]:
950 self.dirstate.forget(f)
950 self.dirstate.forget(f)
951 self.dirstate.setparents(ret)
951 self.dirstate.setparents(ret)
952 ms.reset()
952 ms.reset()
953 finally:
953 finally:
954 wlock.release()
954 wlock.release()
955
955
956 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
956 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
957 return ret
957 return ret
958
958
959 def commitctx(self, ctx, error=False):
959 def commitctx(self, ctx, error=False):
960 """Add a new revision to current repository.
960 """Add a new revision to current repository.
961 Revision information is passed via the context argument.
961 Revision information is passed via the context argument.
962 """
962 """
963
963
964 tr = lock = None
964 tr = lock = None
965 removed = ctx.removed()
965 removed = ctx.removed()
966 p1, p2 = ctx.p1(), ctx.p2()
966 p1, p2 = ctx.p1(), ctx.p2()
967 m1 = p1.manifest().copy()
967 m1 = p1.manifest().copy()
968 m2 = p2.manifest()
968 m2 = p2.manifest()
969 user = ctx.user()
969 user = ctx.user()
970
970
971 lock = self.lock()
971 lock = self.lock()
972 try:
972 try:
973 tr = self.transaction("commit")
973 tr = self.transaction("commit")
974 trp = weakref.proxy(tr)
974 trp = weakref.proxy(tr)
975
975
976 # check in files
976 # check in files
977 new = {}
977 new = {}
978 changed = []
978 changed = []
979 linkrev = len(self)
979 linkrev = len(self)
980 for f in sorted(ctx.modified() + ctx.added()):
980 for f in sorted(ctx.modified() + ctx.added()):
981 self.ui.note(f + "\n")
981 self.ui.note(f + "\n")
982 try:
982 try:
983 fctx = ctx[f]
983 fctx = ctx[f]
984 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
984 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
985 changed)
985 changed)
986 m1.set(f, fctx.flags())
986 m1.set(f, fctx.flags())
987 except OSError, inst:
987 except OSError, inst:
988 self.ui.warn(_("trouble committing %s!\n") % f)
988 self.ui.warn(_("trouble committing %s!\n") % f)
989 raise
989 raise
990 except IOError, inst:
990 except IOError, inst:
991 errcode = getattr(inst, 'errno', errno.ENOENT)
991 errcode = getattr(inst, 'errno', errno.ENOENT)
992 if error or errcode and errcode != errno.ENOENT:
992 if error or errcode and errcode != errno.ENOENT:
993 self.ui.warn(_("trouble committing %s!\n") % f)
993 self.ui.warn(_("trouble committing %s!\n") % f)
994 raise
994 raise
995 else:
995 else:
996 removed.append(f)
996 removed.append(f)
997
997
998 # update manifest
998 # update manifest
999 m1.update(new)
999 m1.update(new)
1000 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1000 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1001 drop = [f for f in removed if f in m1]
1001 drop = [f for f in removed if f in m1]
1002 for f in drop:
1002 for f in drop:
1003 del m1[f]
1003 del m1[f]
1004 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1004 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1005 p2.manifestnode(), (new, drop))
1005 p2.manifestnode(), (new, drop))
1006
1006
1007 # update changelog
1007 # update changelog
1008 self.changelog.delayupdate()
1008 self.changelog.delayupdate()
1009 n = self.changelog.add(mn, changed + removed, ctx.description(),
1009 n = self.changelog.add(mn, changed + removed, ctx.description(),
1010 trp, p1.node(), p2.node(),
1010 trp, p1.node(), p2.node(),
1011 user, ctx.date(), ctx.extra().copy())
1011 user, ctx.date(), ctx.extra().copy())
1012 p = lambda: self.changelog.writepending() and self.root or ""
1012 p = lambda: self.changelog.writepending() and self.root or ""
1013 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1013 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1014 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1014 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1015 parent2=xp2, pending=p)
1015 parent2=xp2, pending=p)
1016 self.changelog.finalize(trp)
1016 self.changelog.finalize(trp)
1017 tr.close()
1017 tr.close()
1018
1018
1019 if self._branchcache:
1019 if self._branchcache:
1020 self.updatebranchcache()
1020 self.updatebranchcache()
1021 return n
1021 return n
1022 finally:
1022 finally:
1023 if tr:
1023 if tr:
1024 tr.release()
1024 tr.release()
1025 lock.release()
1025 lock.release()
1026
1026
1027 def destroyed(self):
1027 def destroyed(self):
1028 '''Inform the repository that nodes have been destroyed.
1028 '''Inform the repository that nodes have been destroyed.
1029 Intended for use by strip and rollback, so there's a common
1029 Intended for use by strip and rollback, so there's a common
1030 place for anything that has to be done after destroying history.'''
1030 place for anything that has to be done after destroying history.'''
1031 # XXX it might be nice if we could take the list of destroyed
1031 # XXX it might be nice if we could take the list of destroyed
1032 # nodes, but I don't see an easy way for rollback() to do that
1032 # nodes, but I don't see an easy way for rollback() to do that
1033
1033
1034 # Ensure the persistent tag cache is updated. Doing it now
1034 # Ensure the persistent tag cache is updated. Doing it now
1035 # means that the tag cache only has to worry about destroyed
1035 # means that the tag cache only has to worry about destroyed
1036 # heads immediately after a strip/rollback. That in turn
1036 # heads immediately after a strip/rollback. That in turn
1037 # guarantees that "cachetip == currenttip" (comparing both rev
1037 # guarantees that "cachetip == currenttip" (comparing both rev
1038 # and node) always means no nodes have been added or destroyed.
1038 # and node) always means no nodes have been added or destroyed.
1039
1039
1040 # XXX this is suboptimal when qrefresh'ing: we strip the current
1040 # XXX this is suboptimal when qrefresh'ing: we strip the current
1041 # head, refresh the tag cache, then immediately add a new head.
1041 # head, refresh the tag cache, then immediately add a new head.
1042 # But I think doing it this way is necessary for the "instant
1042 # But I think doing it this way is necessary for the "instant
1043 # tag cache retrieval" case to work.
1043 # tag cache retrieval" case to work.
1044 self.invalidatecaches()
1044 self.invalidatecaches()
1045
1045
1046 def walk(self, match, node=None):
1046 def walk(self, match, node=None):
1047 '''
1047 '''
1048 walk recursively through the directory tree or a given
1048 walk recursively through the directory tree or a given
1049 changeset, finding all files matched by the match
1049 changeset, finding all files matched by the match
1050 function
1050 function
1051 '''
1051 '''
1052 return self[node].walk(match)
1052 return self[node].walk(match)
1053
1053
1054 def status(self, node1='.', node2=None, match=None,
1054 def status(self, node1='.', node2=None, match=None,
1055 ignored=False, clean=False, unknown=False,
1055 ignored=False, clean=False, unknown=False,
1056 listsubrepos=False):
1056 listsubrepos=False):
1057 """return status of files between two nodes or node and working directory
1057 """return status of files between two nodes or node and working directory
1058
1058
1059 If node1 is None, use the first dirstate parent instead.
1059 If node1 is None, use the first dirstate parent instead.
1060 If node2 is None, compare node1 with working directory.
1060 If node2 is None, compare node1 with working directory.
1061 """
1061 """
1062
1062
1063 def mfmatches(ctx):
1063 def mfmatches(ctx):
1064 mf = ctx.manifest().copy()
1064 mf = ctx.manifest().copy()
1065 for fn in mf.keys():
1065 for fn in mf.keys():
1066 if not match(fn):
1066 if not match(fn):
1067 del mf[fn]
1067 del mf[fn]
1068 return mf
1068 return mf
1069
1069
1070 if isinstance(node1, context.changectx):
1070 if isinstance(node1, context.changectx):
1071 ctx1 = node1
1071 ctx1 = node1
1072 else:
1072 else:
1073 ctx1 = self[node1]
1073 ctx1 = self[node1]
1074 if isinstance(node2, context.changectx):
1074 if isinstance(node2, context.changectx):
1075 ctx2 = node2
1075 ctx2 = node2
1076 else:
1076 else:
1077 ctx2 = self[node2]
1077 ctx2 = self[node2]
1078
1078
1079 working = ctx2.rev() is None
1079 working = ctx2.rev() is None
1080 parentworking = working and ctx1 == self['.']
1080 parentworking = working and ctx1 == self['.']
1081 match = match or matchmod.always(self.root, self.getcwd())
1081 match = match or matchmod.always(self.root, self.getcwd())
1082 listignored, listclean, listunknown = ignored, clean, unknown
1082 listignored, listclean, listunknown = ignored, clean, unknown
1083
1083
1084 # load earliest manifest first for caching reasons
1084 # load earliest manifest first for caching reasons
1085 if not working and ctx2.rev() < ctx1.rev():
1085 if not working and ctx2.rev() < ctx1.rev():
1086 ctx2.manifest()
1086 ctx2.manifest()
1087
1087
1088 if not parentworking:
1088 if not parentworking:
1089 def bad(f, msg):
1089 def bad(f, msg):
1090 if f not in ctx1:
1090 if f not in ctx1:
1091 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1091 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1092 match.bad = bad
1092 match.bad = bad
1093
1093
1094 if working: # we need to scan the working dir
1094 if working: # we need to scan the working dir
1095 subrepos = []
1095 subrepos = []
1096 if '.hgsub' in self.dirstate:
1096 if '.hgsub' in self.dirstate:
1097 subrepos = ctx1.substate.keys()
1097 subrepos = ctx1.substate.keys()
1098 s = self.dirstate.status(match, subrepos, listignored,
1098 s = self.dirstate.status(match, subrepos, listignored,
1099 listclean, listunknown)
1099 listclean, listunknown)
1100 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1100 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1101
1101
1102 # check for any possibly clean files
1102 # check for any possibly clean files
1103 if parentworking and cmp:
1103 if parentworking and cmp:
1104 fixup = []
1104 fixup = []
1105 # do a full compare of any files that might have changed
1105 # do a full compare of any files that might have changed
1106 for f in sorted(cmp):
1106 for f in sorted(cmp):
1107 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1107 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1108 or ctx1[f].cmp(ctx2[f])):
1108 or ctx1[f].cmp(ctx2[f])):
1109 modified.append(f)
1109 modified.append(f)
1110 else:
1110 else:
1111 fixup.append(f)
1111 fixup.append(f)
1112
1112
1113 # update dirstate for files that are actually clean
1113 # update dirstate for files that are actually clean
1114 if fixup:
1114 if fixup:
1115 if listclean:
1115 if listclean:
1116 clean += fixup
1116 clean += fixup
1117
1117
1118 try:
1118 try:
1119 # updating the dirstate is optional
1119 # updating the dirstate is optional
1120 # so we don't wait on the lock
1120 # so we don't wait on the lock
1121 wlock = self.wlock(False)
1121 wlock = self.wlock(False)
1122 try:
1122 try:
1123 for f in fixup:
1123 for f in fixup:
1124 self.dirstate.normal(f)
1124 self.dirstate.normal(f)
1125 finally:
1125 finally:
1126 wlock.release()
1126 wlock.release()
1127 except error.LockError:
1127 except error.LockError:
1128 pass
1128 pass
1129
1129
1130 if not parentworking:
1130 if not parentworking:
1131 mf1 = mfmatches(ctx1)
1131 mf1 = mfmatches(ctx1)
1132 if working:
1132 if working:
1133 # we are comparing working dir against non-parent
1133 # we are comparing working dir against non-parent
1134 # generate a pseudo-manifest for the working dir
1134 # generate a pseudo-manifest for the working dir
1135 mf2 = mfmatches(self['.'])
1135 mf2 = mfmatches(self['.'])
1136 for f in cmp + modified + added:
1136 for f in cmp + modified + added:
1137 mf2[f] = None
1137 mf2[f] = None
1138 mf2.set(f, ctx2.flags(f))
1138 mf2.set(f, ctx2.flags(f))
1139 for f in removed:
1139 for f in removed:
1140 if f in mf2:
1140 if f in mf2:
1141 del mf2[f]
1141 del mf2[f]
1142 else:
1142 else:
1143 # we are comparing two revisions
1143 # we are comparing two revisions
1144 deleted, unknown, ignored = [], [], []
1144 deleted, unknown, ignored = [], [], []
1145 mf2 = mfmatches(ctx2)
1145 mf2 = mfmatches(ctx2)
1146
1146
1147 modified, added, clean = [], [], []
1147 modified, added, clean = [], [], []
1148 for fn in mf2:
1148 for fn in mf2:
1149 if fn in mf1:
1149 if fn in mf1:
1150 if (mf1.flags(fn) != mf2.flags(fn) or
1150 if (mf1.flags(fn) != mf2.flags(fn) or
1151 (mf1[fn] != mf2[fn] and
1151 (mf1[fn] != mf2[fn] and
1152 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1152 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1153 modified.append(fn)
1153 modified.append(fn)
1154 elif listclean:
1154 elif listclean:
1155 clean.append(fn)
1155 clean.append(fn)
1156 del mf1[fn]
1156 del mf1[fn]
1157 else:
1157 else:
1158 added.append(fn)
1158 added.append(fn)
1159 removed = mf1.keys()
1159 removed = mf1.keys()
1160
1160
1161 r = modified, added, removed, deleted, unknown, ignored, clean
1161 r = modified, added, removed, deleted, unknown, ignored, clean
1162
1162
1163 if listsubrepos:
1163 if listsubrepos:
1164 for subpath in ctx1.substate:
1164 for subpath in ctx1.substate:
1165 sub = ctx1.sub(subpath)
1165 sub = ctx1.sub(subpath)
1166 if working:
1166 if working:
1167 rev2 = None
1167 rev2 = None
1168 else:
1168 else:
1169 rev2 = ctx2.substate[subpath][1]
1169 rev2 = ctx2.substate[subpath][1]
1170 try:
1170 try:
1171 submatch = matchmod.narrowmatcher(subpath, match)
1171 submatch = matchmod.narrowmatcher(subpath, match)
1172 s = sub.status(rev2, match=submatch, ignored=listignored,
1172 s = sub.status(rev2, match=submatch, ignored=listignored,
1173 clean=listclean, unknown=listunknown,
1173 clean=listclean, unknown=listunknown,
1174 listsubrepos=True)
1174 listsubrepos=True)
1175 for rfiles, sfiles in zip(r, s):
1175 for rfiles, sfiles in zip(r, s):
1176 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1176 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1177 except error.LookupError:
1177 except error.LookupError:
1178 self.ui.status(_("skipping missing subrepository: %s\n")
1178 self.ui.status(_("skipping missing subrepository: %s\n")
1179 % subpath)
1179 % subpath)
1180
1180
1181 [l.sort() for l in r]
1181 [l.sort() for l in r]
1182 return r
1182 return r
1183
1183
1184 def heads(self, start=None):
1184 def heads(self, start=None):
1185 heads = self.changelog.heads(start)
1185 heads = self.changelog.heads(start)
1186 # sort the output in rev descending order
1186 # sort the output in rev descending order
1187 heads = [(-self.changelog.rev(h), h) for h in heads]
1187 heads = [(-self.changelog.rev(h), h) for h in heads]
1188 return [n for (r, n) in sorted(heads)]
1188 return [n for (r, n) in sorted(heads)]
1189
1189
1190 def branchheads(self, branch=None, start=None, closed=False):
1190 def branchheads(self, branch=None, start=None, closed=False):
1191 '''return a (possibly filtered) list of heads for the given branch
1191 '''return a (possibly filtered) list of heads for the given branch
1192
1192
1193 Heads are returned in topological order, from newest to oldest.
1193 Heads are returned in topological order, from newest to oldest.
1194 If branch is None, use the dirstate branch.
1194 If branch is None, use the dirstate branch.
1195 If start is not None, return only heads reachable from start.
1195 If start is not None, return only heads reachable from start.
1196 If closed is True, return heads that are marked as closed as well.
1196 If closed is True, return heads that are marked as closed as well.
1197 '''
1197 '''
1198 if branch is None:
1198 if branch is None:
1199 branch = self[None].branch()
1199 branch = self[None].branch()
1200 branches = self.branchmap()
1200 branches = self.branchmap()
1201 if branch not in branches:
1201 if branch not in branches:
1202 return []
1202 return []
1203 # the cache returns heads ordered lowest to highest
1203 # the cache returns heads ordered lowest to highest
1204 bheads = list(reversed(branches[branch]))
1204 bheads = list(reversed(branches[branch]))
1205 if start is not None:
1205 if start is not None:
1206 # filter out the heads that cannot be reached from startrev
1206 # filter out the heads that cannot be reached from startrev
1207 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1207 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1208 bheads = [h for h in bheads if h in fbheads]
1208 bheads = [h for h in bheads if h in fbheads]
1209 if not closed:
1209 if not closed:
1210 bheads = [h for h in bheads if
1210 bheads = [h for h in bheads if
1211 ('close' not in self.changelog.read(h)[5])]
1211 ('close' not in self.changelog.read(h)[5])]
1212 return bheads
1212 return bheads
1213
1213
1214 def branches(self, nodes):
1214 def branches(self, nodes):
1215 if not nodes:
1215 if not nodes:
1216 nodes = [self.changelog.tip()]
1216 nodes = [self.changelog.tip()]
1217 b = []
1217 b = []
1218 for n in nodes:
1218 for n in nodes:
1219 t = n
1219 t = n
1220 while 1:
1220 while 1:
1221 p = self.changelog.parents(n)
1221 p = self.changelog.parents(n)
1222 if p[1] != nullid or p[0] == nullid:
1222 if p[1] != nullid or p[0] == nullid:
1223 b.append((t, n, p[0], p[1]))
1223 b.append((t, n, p[0], p[1]))
1224 break
1224 break
1225 n = p[0]
1225 n = p[0]
1226 return b
1226 return b
1227
1227
1228 def between(self, pairs):
1228 def between(self, pairs):
1229 r = []
1229 r = []
1230
1230
1231 for top, bottom in pairs:
1231 for top, bottom in pairs:
1232 n, l, i = top, [], 0
1232 n, l, i = top, [], 0
1233 f = 1
1233 f = 1
1234
1234
1235 while n != bottom and n != nullid:
1235 while n != bottom and n != nullid:
1236 p = self.changelog.parents(n)[0]
1236 p = self.changelog.parents(n)[0]
1237 if i == f:
1237 if i == f:
1238 l.append(n)
1238 l.append(n)
1239 f = f * 2
1239 f = f * 2
1240 n = p
1240 n = p
1241 i += 1
1241 i += 1
1242
1242
1243 r.append(l)
1243 r.append(l)
1244
1244
1245 return r
1245 return r
1246
1246
1247 def pull(self, remote, heads=None, force=False):
1247 def pull(self, remote, heads=None, force=False):
1248 lock = self.lock()
1248 lock = self.lock()
1249 try:
1249 try:
1250 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1250 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1251 force=force)
1251 force=force)
1252 common, fetch, rheads = tmp
1252 common, fetch, rheads = tmp
1253 if not fetch:
1253 if not fetch:
1254 self.ui.status(_("no changes found\n"))
1254 self.ui.status(_("no changes found\n"))
1255 return 0
1255 return 0
1256
1256
1257 if fetch == [nullid]:
1257 if fetch == [nullid]:
1258 self.ui.status(_("requesting all changes\n"))
1258 self.ui.status(_("requesting all changes\n"))
1259 elif heads is None and remote.capable('changegroupsubset'):
1259 elif heads is None and remote.capable('changegroupsubset'):
1260 # issue1320, avoid a race if remote changed after discovery
1260 # issue1320, avoid a race if remote changed after discovery
1261 heads = rheads
1261 heads = rheads
1262
1262
1263 if heads is None:
1263 if heads is None:
1264 cg = remote.changegroup(fetch, 'pull')
1264 cg = remote.changegroup(fetch, 'pull')
1265 else:
1265 else:
1266 if not remote.capable('changegroupsubset'):
1266 if not remote.capable('changegroupsubset'):
1267 raise util.Abort(_("partial pull cannot be done because "
1267 raise util.Abort(_("partial pull cannot be done because "
1268 "other repository doesn't support "
1268 "other repository doesn't support "
1269 "changegroupsubset."))
1269 "changegroupsubset."))
1270 cg = remote.changegroupsubset(fetch, heads, 'pull')
1270 cg = remote.changegroupsubset(fetch, heads, 'pull')
1271 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1271 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1272 finally:
1272 finally:
1273 lock.release()
1273 lock.release()
1274
1274
1275 def push(self, remote, force=False, revs=None, newbranch=False):
1275 def push(self, remote, force=False, revs=None, newbranch=False):
1276 '''Push outgoing changesets (limited by revs) from the current
1276 '''Push outgoing changesets (limited by revs) from the current
1277 repository to remote. Return an integer:
1277 repository to remote. Return an integer:
1278 - 0 means HTTP error *or* nothing to push
1278 - 0 means HTTP error *or* nothing to push
1279 - 1 means we pushed and remote head count is unchanged *or*
1279 - 1 means we pushed and remote head count is unchanged *or*
1280 we have outgoing changesets but refused to push
1280 we have outgoing changesets but refused to push
1281 - other values as described by addchangegroup()
1281 - other values as described by addchangegroup()
1282 '''
1282 '''
1283 # there are two ways to push to remote repo:
1283 # there are two ways to push to remote repo:
1284 #
1284 #
1285 # addchangegroup assumes local user can lock remote
1285 # addchangegroup assumes local user can lock remote
1286 # repo (local filesystem, old ssh servers).
1286 # repo (local filesystem, old ssh servers).
1287 #
1287 #
1288 # unbundle assumes local user cannot lock remote repo (new ssh
1288 # unbundle assumes local user cannot lock remote repo (new ssh
1289 # servers, http servers).
1289 # servers, http servers).
1290
1290
1291 lock = None
1291 lock = None
1292 unbundle = remote.capable('unbundle')
1292 unbundle = remote.capable('unbundle')
1293 if not unbundle:
1293 if not unbundle:
1294 lock = remote.lock()
1294 lock = remote.lock()
1295 try:
1295 try:
1296 ret = discovery.prepush(self, remote, force, revs, newbranch)
1296 ret = discovery.prepush(self, remote, force, revs, newbranch)
1297 if ret[0] is None:
1297 if ret[0] is None:
1298 # and here we return 0 for "nothing to push" or 1 for
1298 # and here we return 0 for "nothing to push" or 1 for
1299 # "something to push but I refuse"
1299 # "something to push but I refuse"
1300 return ret[1]
1300 return ret[1]
1301
1301
1302 cg, remote_heads = ret
1302 cg, remote_heads = ret
1303 if unbundle:
1303 if unbundle:
1304 # local repo finds heads on server, finds out what revs it must
1304 # local repo finds heads on server, finds out what revs it must
1305 # push. once revs transferred, if server finds it has
1305 # push. once revs transferred, if server finds it has
1306 # different heads (someone else won commit/push race), server
1306 # different heads (someone else won commit/push race), server
1307 # aborts.
1307 # aborts.
1308 if force:
1308 if force:
1309 remote_heads = ['force']
1309 remote_heads = ['force']
1310 # ssh: return remote's addchangegroup()
1310 # ssh: return remote's addchangegroup()
1311 # http: return remote's addchangegroup() or 0 for error
1311 # http: return remote's addchangegroup() or 0 for error
1312 return remote.unbundle(cg, remote_heads, 'push')
1312 return remote.unbundle(cg, remote_heads, 'push')
1313 else:
1313 else:
1314 # we return an integer indicating remote head count change
1314 # we return an integer indicating remote head count change
1315 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1315 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1316 finally:
1316 finally:
1317 if lock is not None:
1317 if lock is not None:
1318 lock.release()
1318 lock.release()
1319
1319
1320 def changegroupinfo(self, nodes, source):
1320 def changegroupinfo(self, nodes, source):
1321 if self.ui.verbose or source == 'bundle':
1321 if self.ui.verbose or source == 'bundle':
1322 self.ui.status(_("%d changesets found\n") % len(nodes))
1322 self.ui.status(_("%d changesets found\n") % len(nodes))
1323 if self.ui.debugflag:
1323 if self.ui.debugflag:
1324 self.ui.debug("list of changesets:\n")
1324 self.ui.debug("list of changesets:\n")
1325 for node in nodes:
1325 for node in nodes:
1326 self.ui.debug("%s\n" % hex(node))
1326 self.ui.debug("%s\n" % hex(node))
1327
1327
1328 def changegroupsubset(self, bases, heads, source, extranodes=None):
1328 def changegroupsubset(self, bases, heads, source, extranodes=None):
1329 """Compute a changegroup consisting of all the nodes that are
1329 """Compute a changegroup consisting of all the nodes that are
1330 descendents of any of the bases and ancestors of any of the heads.
1330 descendents of any of the bases and ancestors of any of the heads.
1331 Return a chunkbuffer object whose read() method will return
1331 Return a chunkbuffer object whose read() method will return
1332 successive changegroup chunks.
1332 successive changegroup chunks.
1333
1333
1334 It is fairly complex as determining which filenodes and which
1334 It is fairly complex as determining which filenodes and which
1335 manifest nodes need to be included for the changeset to be complete
1335 manifest nodes need to be included for the changeset to be complete
1336 is non-trivial.
1336 is non-trivial.
1337
1337
1338 Another wrinkle is doing the reverse, figuring out which changeset in
1338 Another wrinkle is doing the reverse, figuring out which changeset in
1339 the changegroup a particular filenode or manifestnode belongs to.
1339 the changegroup a particular filenode or manifestnode belongs to.
1340
1340
1341 The caller can specify some nodes that must be included in the
1341 The caller can specify some nodes that must be included in the
1342 changegroup using the extranodes argument. It should be a dict
1342 changegroup using the extranodes argument. It should be a dict
1343 where the keys are the filenames (or 1 for the manifest), and the
1343 where the keys are the filenames (or 1 for the manifest), and the
1344 values are lists of (node, linknode) tuples, where node is a wanted
1344 values are lists of (node, linknode) tuples, where node is a wanted
1345 node and linknode is the changelog node that should be transmitted as
1345 node and linknode is the changelog node that should be transmitted as
1346 the linkrev.
1346 the linkrev.
1347 """
1347 """
1348
1348
1349 # Set up some initial variables
1349 # Set up some initial variables
1350 # Make it easy to refer to self.changelog
1350 # Make it easy to refer to self.changelog
1351 cl = self.changelog
1351 cl = self.changelog
1352 # Compute the list of changesets in this changegroup.
1352 # Compute the list of changesets in this changegroup.
1353 # Some bases may turn out to be superfluous, and some heads may be
1353 # Some bases may turn out to be superfluous, and some heads may be
1354 # too. nodesbetween will return the minimal set of bases and heads
1354 # too. nodesbetween will return the minimal set of bases and heads
1355 # necessary to re-create the changegroup.
1355 # necessary to re-create the changegroup.
1356 if not bases:
1356 if not bases:
1357 bases = [nullid]
1357 bases = [nullid]
1358 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1358 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1359
1359
1360 if extranodes is None:
1360 if extranodes is None:
1361 # can we go through the fast path ?
1361 # can we go through the fast path ?
1362 heads.sort()
1362 heads.sort()
1363 allheads = self.heads()
1363 allheads = self.heads()
1364 allheads.sort()
1364 allheads.sort()
1365 if heads == allheads:
1365 if heads == allheads:
1366 return self._changegroup(msng_cl_lst, source)
1366 return self._changegroup(msng_cl_lst, source)
1367
1367
1368 # slow path
1368 # slow path
1369 self.hook('preoutgoing', throw=True, source=source)
1369 self.hook('preoutgoing', throw=True, source=source)
1370
1370
1371 self.changegroupinfo(msng_cl_lst, source)
1371 self.changegroupinfo(msng_cl_lst, source)
1372
1372
1373 # We assume that all ancestors of bases are known
1373 # We assume that all ancestors of bases are known
1374 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1374 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1375
1375
1376 # Make it easy to refer to self.manifest
1376 # Make it easy to refer to self.manifest
1377 mnfst = self.manifest
1377 mnfst = self.manifest
1378 # We don't know which manifests are missing yet
1378 # We don't know which manifests are missing yet
1379 msng_mnfst_set = {}
1379 msng_mnfst_set = {}
1380 # Nor do we know which filenodes are missing.
1380 # Nor do we know which filenodes are missing.
1381 msng_filenode_set = {}
1381 msng_filenode_set = {}
1382
1382
1383 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1383 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1384 junk = None
1384 junk = None
1385
1385
1386 # A changeset always belongs to itself, so the changenode lookup
1386 # A changeset always belongs to itself, so the changenode lookup
1387 # function for a changenode is identity.
1387 # function for a changenode is identity.
1388 def identity(x):
1388 def identity(x):
1389 return x
1389 return x
1390
1390
1391 # A function generating function that sets up the initial environment
1391 # A function generating function that sets up the initial environment
1392 # the inner function.
1392 # the inner function.
1393 def filenode_collector(changedfiles):
1393 def filenode_collector(changedfiles):
1394 # This gathers information from each manifestnode included in the
1394 # This gathers information from each manifestnode included in the
1395 # changegroup about which filenodes the manifest node references
1395 # changegroup about which filenodes the manifest node references
1396 # so we can include those in the changegroup too.
1396 # so we can include those in the changegroup too.
1397 #
1397 #
1398 # It also remembers which changenode each filenode belongs to. It
1398 # It also remembers which changenode each filenode belongs to. It
1399 # does this by assuming the a filenode belongs to the changenode
1399 # does this by assuming the a filenode belongs to the changenode
1400 # the first manifest that references it belongs to.
1400 # the first manifest that references it belongs to.
1401 def collect_msng_filenodes(mnfstnode):
1401 def collect_msng_filenodes(mnfstnode):
1402 r = mnfst.rev(mnfstnode)
1402 r = mnfst.rev(mnfstnode)
1403 if r - 1 in mnfst.parentrevs(r):
1403 if r - 1 in mnfst.parentrevs(r):
1404 # If the previous rev is one of the parents,
1404 # If the previous rev is one of the parents,
1405 # we only need to see a diff.
1405 # we only need to see a diff.
1406 deltamf = mnfst.readdelta(mnfstnode)
1406 deltamf = mnfst.readdelta(mnfstnode)
1407 # For each line in the delta
1407 # For each line in the delta
1408 for f, fnode in deltamf.iteritems():
1408 for f, fnode in deltamf.iteritems():
1409 # And if the file is in the list of files we care
1409 # And if the file is in the list of files we care
1410 # about.
1410 # about.
1411 if f in changedfiles:
1411 if f in changedfiles:
1412 # Get the changenode this manifest belongs to
1412 # Get the changenode this manifest belongs to
1413 clnode = msng_mnfst_set[mnfstnode]
1413 clnode = msng_mnfst_set[mnfstnode]
1414 # Create the set of filenodes for the file if
1414 # Create the set of filenodes for the file if
1415 # there isn't one already.
1415 # there isn't one already.
1416 ndset = msng_filenode_set.setdefault(f, {})
1416 ndset = msng_filenode_set.setdefault(f, {})
1417 # And set the filenode's changelog node to the
1417 # And set the filenode's changelog node to the
1418 # manifest's if it hasn't been set already.
1418 # manifest's if it hasn't been set already.
1419 ndset.setdefault(fnode, clnode)
1419 ndset.setdefault(fnode, clnode)
1420 else:
1420 else:
1421 # Otherwise we need a full manifest.
1421 # Otherwise we need a full manifest.
1422 m = mnfst.read(mnfstnode)
1422 m = mnfst.read(mnfstnode)
1423 # For every file in we care about.
1423 # For every file in we care about.
1424 for f in changedfiles:
1424 for f in changedfiles:
1425 fnode = m.get(f, None)
1425 fnode = m.get(f, None)
1426 # If it's in the manifest
1426 # If it's in the manifest
1427 if fnode is not None:
1427 if fnode is not None:
1428 # See comments above.
1428 # See comments above.
1429 clnode = msng_mnfst_set[mnfstnode]
1429 clnode = msng_mnfst_set[mnfstnode]
1430 ndset = msng_filenode_set.setdefault(f, {})
1430 ndset = msng_filenode_set.setdefault(f, {})
1431 ndset.setdefault(fnode, clnode)
1431 ndset.setdefault(fnode, clnode)
1432 return collect_msng_filenodes
1432 return collect_msng_filenodes
1433
1433
1434 # If we determine that a particular file or manifest node must be a
1434 # If we determine that a particular file or manifest node must be a
1435 # node that the recipient of the changegroup will already have, we can
1435 # node that the recipient of the changegroup will already have, we can
1436 # also assume the recipient will have all the parents. This function
1436 # also assume the recipient will have all the parents. This function
1437 # prunes them from the set of missing nodes.
1437 # prunes them from the set of missing nodes.
1438 def prune(revlog, missingnodes):
1438 def prune(revlog, missingnodes):
1439 hasset = set()
1439 hasset = set()
1440 # If a 'missing' filenode thinks it belongs to a changenode we
1440 # If a 'missing' filenode thinks it belongs to a changenode we
1441 # assume the recipient must have, then the recipient must have
1441 # assume the recipient must have, then the recipient must have
1442 # that filenode.
1442 # that filenode.
1443 for n in missingnodes:
1443 for n in missingnodes:
1444 clrev = revlog.linkrev(revlog.rev(n))
1444 clrev = revlog.linkrev(revlog.rev(n))
1445 if clrev in commonrevs:
1445 if clrev in commonrevs:
1446 hasset.add(n)
1446 hasset.add(n)
1447 for n in hasset:
1447 for n in hasset:
1448 missingnodes.pop(n, None)
1448 missingnodes.pop(n, None)
1449 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1449 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1450 missingnodes.pop(revlog.node(r), None)
1450 missingnodes.pop(revlog.node(r), None)
1451
1451
1452 # Add the nodes that were explicitly requested.
1452 # Add the nodes that were explicitly requested.
1453 def add_extra_nodes(name, nodes):
1453 def add_extra_nodes(name, nodes):
1454 if not extranodes or name not in extranodes:
1454 if not extranodes or name not in extranodes:
1455 return
1455 return
1456
1456
1457 for node, linknode in extranodes[name]:
1457 for node, linknode in extranodes[name]:
1458 if node not in nodes:
1458 if node not in nodes:
1459 nodes[node] = linknode
1459 nodes[node] = linknode
1460
1460
1461 # Now that we have all theses utility functions to help out and
1461 # Now that we have all theses utility functions to help out and
1462 # logically divide up the task, generate the group.
1462 # logically divide up the task, generate the group.
1463 def gengroup():
1463 def gengroup():
1464 # The set of changed files starts empty.
1464 # The set of changed files starts empty.
1465 changedfiles = set()
1465 changedfiles = set()
1466 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1466 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1467
1467
1468 # Create a changenode group generator that will call our functions
1468 # Create a changenode group generator that will call our functions
1469 # back to lookup the owning changenode and collect information.
1469 # back to lookup the owning changenode and collect information.
1470 group = cl.group(msng_cl_lst, identity, collect)
1470 group = cl.group(msng_cl_lst, identity, collect)
1471 for cnt, chnk in enumerate(group):
1471 for cnt, chnk in enumerate(group):
1472 yield chnk
1472 yield chnk
1473 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1473 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1474 self.ui.progress(_('bundling changes'), None)
1474 self.ui.progress(_('bundling changes'), None)
1475
1475
1476 prune(mnfst, msng_mnfst_set)
1476 prune(mnfst, msng_mnfst_set)
1477 add_extra_nodes(1, msng_mnfst_set)
1477 add_extra_nodes(1, msng_mnfst_set)
1478 msng_mnfst_lst = msng_mnfst_set.keys()
1478 msng_mnfst_lst = msng_mnfst_set.keys()
1479 # Sort the manifestnodes by revision number.
1479 # Sort the manifestnodes by revision number.
1480 msng_mnfst_lst.sort(key=mnfst.rev)
1480 msng_mnfst_lst.sort(key=mnfst.rev)
1481 # Create a generator for the manifestnodes that calls our lookup
1481 # Create a generator for the manifestnodes that calls our lookup
1482 # and data collection functions back.
1482 # and data collection functions back.
1483 group = mnfst.group(msng_mnfst_lst,
1483 group = mnfst.group(msng_mnfst_lst,
1484 lambda mnode: msng_mnfst_set[mnode],
1484 lambda mnode: msng_mnfst_set[mnode],
1485 filenode_collector(changedfiles))
1485 filenode_collector(changedfiles))
1486 for cnt, chnk in enumerate(group):
1486 for cnt, chnk in enumerate(group):
1487 yield chnk
1487 yield chnk
1488 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1488 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1489 self.ui.progress(_('bundling manifests'), None)
1489 self.ui.progress(_('bundling manifests'), None)
1490
1490
1491 # These are no longer needed, dereference and toss the memory for
1491 # These are no longer needed, dereference and toss the memory for
1492 # them.
1492 # them.
1493 msng_mnfst_lst = None
1493 msng_mnfst_lst = None
1494 msng_mnfst_set.clear()
1494 msng_mnfst_set.clear()
1495
1495
1496 if extranodes:
1496 if extranodes:
1497 for fname in extranodes:
1497 for fname in extranodes:
1498 if isinstance(fname, int):
1498 if isinstance(fname, int):
1499 continue
1499 continue
1500 msng_filenode_set.setdefault(fname, {})
1500 msng_filenode_set.setdefault(fname, {})
1501 changedfiles.add(fname)
1501 changedfiles.add(fname)
1502 # Go through all our files in order sorted by name.
1502 # Go through all our files in order sorted by name.
1503 cnt = 0
1503 cnt = 0
1504 for fname in sorted(changedfiles):
1504 for fname in sorted(changedfiles):
1505 filerevlog = self.file(fname)
1505 filerevlog = self.file(fname)
1506 if not len(filerevlog):
1506 if not len(filerevlog):
1507 raise util.Abort(_("empty or missing revlog for %s") % fname)
1507 raise util.Abort(_("empty or missing revlog for %s") % fname)
1508 # Toss out the filenodes that the recipient isn't really
1508 # Toss out the filenodes that the recipient isn't really
1509 # missing.
1509 # missing.
1510 missingfnodes = msng_filenode_set.pop(fname, {})
1510 missingfnodes = msng_filenode_set.pop(fname, {})
1511 prune(filerevlog, missingfnodes)
1511 prune(filerevlog, missingfnodes)
1512 add_extra_nodes(fname, missingfnodes)
1512 add_extra_nodes(fname, missingfnodes)
1513 # If any filenodes are left, generate the group for them,
1513 # If any filenodes are left, generate the group for them,
1514 # otherwise don't bother.
1514 # otherwise don't bother.
1515 if missingfnodes:
1515 if missingfnodes:
1516 yield changegroup.chunkheader(len(fname))
1516 yield changegroup.chunkheader(len(fname))
1517 yield fname
1517 yield fname
1518 # Sort the filenodes by their revision # (topological order)
1518 # Sort the filenodes by their revision # (topological order)
1519 nodeiter = list(missingfnodes)
1519 nodeiter = list(missingfnodes)
1520 nodeiter.sort(key=filerevlog.rev)
1520 nodeiter.sort(key=filerevlog.rev)
1521 # Create a group generator and only pass in a changenode
1521 # Create a group generator and only pass in a changenode
1522 # lookup function as we need to collect no information
1522 # lookup function as we need to collect no information
1523 # from filenodes.
1523 # from filenodes.
1524 group = filerevlog.group(nodeiter,
1524 group = filerevlog.group(nodeiter,
1525 lambda fnode: missingfnodes[fnode])
1525 lambda fnode: missingfnodes[fnode])
1526 for chnk in group:
1526 for chnk in group:
1527 self.ui.progress(
1527 self.ui.progress(
1528 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1528 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1529 cnt += 1
1529 cnt += 1
1530 yield chnk
1530 yield chnk
1531 # Signal that no more groups are left.
1531 # Signal that no more groups are left.
1532 yield changegroup.closechunk()
1532 yield changegroup.closechunk()
1533 self.ui.progress(_('bundling files'), None)
1533 self.ui.progress(_('bundling files'), None)
1534
1534
1535 if msng_cl_lst:
1535 if msng_cl_lst:
1536 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1536 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1537
1537
1538 return util.chunkbuffer(gengroup())
1538 return util.chunkbuffer(gengroup())
1539
1539
1540 def changegroup(self, basenodes, source):
1540 def changegroup(self, basenodes, source):
1541 # to avoid a race we use changegroupsubset() (issue1320)
1541 # to avoid a race we use changegroupsubset() (issue1320)
1542 return self.changegroupsubset(basenodes, self.heads(), source)
1542 return self.changegroupsubset(basenodes, self.heads(), source)
1543
1543
1544 def _changegroup(self, nodes, source):
1544 def _changegroup(self, nodes, source):
1545 """Compute the changegroup of all nodes that we have that a recipient
1545 """Compute the changegroup of all nodes that we have that a recipient
1546 doesn't. Return a chunkbuffer object whose read() method will return
1546 doesn't. Return a chunkbuffer object whose read() method will return
1547 successive changegroup chunks.
1547 successive changegroup chunks.
1548
1548
1549 This is much easier than the previous function as we can assume that
1549 This is much easier than the previous function as we can assume that
1550 the recipient has any changenode we aren't sending them.
1550 the recipient has any changenode we aren't sending them.
1551
1551
1552 nodes is the set of nodes to send"""
1552 nodes is the set of nodes to send"""
1553
1553
1554 self.hook('preoutgoing', throw=True, source=source)
1554 self.hook('preoutgoing', throw=True, source=source)
1555
1555
1556 cl = self.changelog
1556 cl = self.changelog
1557 revset = set([cl.rev(n) for n in nodes])
1557 revset = set([cl.rev(n) for n in nodes])
1558 self.changegroupinfo(nodes, source)
1558 self.changegroupinfo(nodes, source)
1559
1559
1560 def identity(x):
1560 def identity(x):
1561 return x
1561 return x
1562
1562
1563 def gennodelst(log):
1563 def gennodelst(log):
1564 for r in log:
1564 for r in log:
1565 if log.linkrev(r) in revset:
1565 if log.linkrev(r) in revset:
1566 yield log.node(r)
1566 yield log.node(r)
1567
1567
1568 def lookuplinkrev_func(revlog):
1568 def lookuplinkrev_func(revlog):
1569 def lookuplinkrev(n):
1569 def lookuplinkrev(n):
1570 return cl.node(revlog.linkrev(revlog.rev(n)))
1570 return cl.node(revlog.linkrev(revlog.rev(n)))
1571 return lookuplinkrev
1571 return lookuplinkrev
1572
1572
1573 def gengroup():
1573 def gengroup():
1574 '''yield a sequence of changegroup chunks (strings)'''
1574 '''yield a sequence of changegroup chunks (strings)'''
1575 # construct a list of all changed files
1575 # construct a list of all changed files
1576 changedfiles = set()
1576 changedfiles = set()
1577 mmfs = {}
1577 mmfs = {}
1578 collect = changegroup.collector(cl, mmfs, changedfiles)
1578 collect = changegroup.collector(cl, mmfs, changedfiles)
1579
1579
1580 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1580 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1581 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1581 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1582 yield chnk
1582 yield chnk
1583 self.ui.progress(_('bundling changes'), None)
1583 self.ui.progress(_('bundling changes'), None)
1584
1584
1585 mnfst = self.manifest
1585 mnfst = self.manifest
1586 nodeiter = gennodelst(mnfst)
1586 nodeiter = gennodelst(mnfst)
1587 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1587 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1588 lookuplinkrev_func(mnfst))):
1588 lookuplinkrev_func(mnfst))):
1589 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1589 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1590 yield chnk
1590 yield chnk
1591 self.ui.progress(_('bundling manifests'), None)
1591 self.ui.progress(_('bundling manifests'), None)
1592
1592
1593 cnt = 0
1593 cnt = 0
1594 for fname in sorted(changedfiles):
1594 for fname in sorted(changedfiles):
1595 filerevlog = self.file(fname)
1595 filerevlog = self.file(fname)
1596 if not len(filerevlog):
1596 if not len(filerevlog):
1597 raise util.Abort(_("empty or missing revlog for %s") % fname)
1597 raise util.Abort(_("empty or missing revlog for %s") % fname)
1598 nodeiter = gennodelst(filerevlog)
1598 nodeiter = gennodelst(filerevlog)
1599 nodeiter = list(nodeiter)
1599 nodeiter = list(nodeiter)
1600 if nodeiter:
1600 if nodeiter:
1601 yield changegroup.chunkheader(len(fname))
1601 yield changegroup.chunkheader(len(fname))
1602 yield fname
1602 yield fname
1603 lookup = lookuplinkrev_func(filerevlog)
1603 lookup = lookuplinkrev_func(filerevlog)
1604 for chnk in filerevlog.group(nodeiter, lookup):
1604 for chnk in filerevlog.group(nodeiter, lookup):
1605 self.ui.progress(
1605 self.ui.progress(
1606 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1606 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1607 cnt += 1
1607 cnt += 1
1608 yield chnk
1608 yield chnk
1609 self.ui.progress(_('bundling files'), None)
1609 self.ui.progress(_('bundling files'), None)
1610
1610
1611 yield changegroup.closechunk()
1611 yield changegroup.closechunk()
1612
1612
1613 if nodes:
1613 if nodes:
1614 self.hook('outgoing', node=hex(nodes[0]), source=source)
1614 self.hook('outgoing', node=hex(nodes[0]), source=source)
1615
1615
1616 return util.chunkbuffer(gengroup())
1616 return util.chunkbuffer(gengroup())
1617
1617
1618 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1618 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1619 """Add the changegroup returned by source.read() to this repo.
1619 """Add the changegroup returned by source.read() to this repo.
1620 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1620 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1621 the URL of the repo where this changegroup is coming from.
1621 the URL of the repo where this changegroup is coming from.
1622
1622
1623 Return an integer summarizing the change to this repo:
1623 Return an integer summarizing the change to this repo:
1624 - nothing changed or no source: 0
1624 - nothing changed or no source: 0
1625 - more heads than before: 1+added heads (2..n)
1625 - more heads than before: 1+added heads (2..n)
1626 - fewer heads than before: -1-removed heads (-2..-n)
1626 - fewer heads than before: -1-removed heads (-2..-n)
1627 - number of heads stays the same: 1
1627 - number of heads stays the same: 1
1628 """
1628 """
1629 def csmap(x):
1629 def csmap(x):
1630 self.ui.debug("add changeset %s\n" % short(x))
1630 self.ui.debug("add changeset %s\n" % short(x))
1631 return len(cl)
1631 return len(cl)
1632
1632
1633 def revmap(x):
1633 def revmap(x):
1634 return cl.rev(x)
1634 return cl.rev(x)
1635
1635
1636 if not source:
1636 if not source:
1637 return 0
1637 return 0
1638
1638
1639 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1639 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1640
1640
1641 changesets = files = revisions = 0
1641 changesets = files = revisions = 0
1642 efiles = set()
1642 efiles = set()
1643
1643
1644 # write changelog data to temp files so concurrent readers will not see
1644 # write changelog data to temp files so concurrent readers will not see
1645 # inconsistent view
1645 # inconsistent view
1646 cl = self.changelog
1646 cl = self.changelog
1647 cl.delayupdate()
1647 cl.delayupdate()
1648 oldheads = len(cl.heads())
1648 oldheads = len(cl.heads())
1649
1649
1650 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1650 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1651 try:
1651 try:
1652 trp = weakref.proxy(tr)
1652 trp = weakref.proxy(tr)
1653 # pull off the changeset group
1653 # pull off the changeset group
1654 self.ui.status(_("adding changesets\n"))
1654 self.ui.status(_("adding changesets\n"))
1655 clstart = len(cl)
1655 clstart = len(cl)
1656 class prog(object):
1656 class prog(object):
1657 step = _('changesets')
1657 step = _('changesets')
1658 count = 1
1658 count = 1
1659 ui = self.ui
1659 ui = self.ui
1660 total = None
1660 total = None
1661 def __call__(self):
1661 def __call__(self):
1662 self.ui.progress(self.step, self.count, unit=_('chunks'),
1662 self.ui.progress(self.step, self.count, unit=_('chunks'),
1663 total=self.total)
1663 total=self.total)
1664 self.count += 1
1664 self.count += 1
1665 pr = prog()
1665 pr = prog()
1666 chunkiter = changegroup.chunkiter(source, progress=pr)
1666 chunkiter = changegroup.chunkiter(source, progress=pr)
1667 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1667 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1668 raise util.Abort(_("received changelog group is empty"))
1668 raise util.Abort(_("received changelog group is empty"))
1669 clend = len(cl)
1669 clend = len(cl)
1670 changesets = clend - clstart
1670 changesets = clend - clstart
1671 for c in xrange(clstart, clend):
1671 for c in xrange(clstart, clend):
1672 efiles.update(self[c].files())
1672 efiles.update(self[c].files())
1673 efiles = len(efiles)
1673 efiles = len(efiles)
1674 self.ui.progress(_('changesets'), None)
1674 self.ui.progress(_('changesets'), None)
1675
1675
1676 # pull off the manifest group
1676 # pull off the manifest group
1677 self.ui.status(_("adding manifests\n"))
1677 self.ui.status(_("adding manifests\n"))
1678 pr.step = _('manifests')
1678 pr.step = _('manifests')
1679 pr.count = 1
1679 pr.count = 1
1680 pr.total = changesets # manifests <= changesets
1680 pr.total = changesets # manifests <= changesets
1681 chunkiter = changegroup.chunkiter(source, progress=pr)
1681 chunkiter = changegroup.chunkiter(source, progress=pr)
1682 # no need to check for empty manifest group here:
1682 # no need to check for empty manifest group here:
1683 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1683 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1684 # no new manifest will be created and the manifest group will
1684 # no new manifest will be created and the manifest group will
1685 # be empty during the pull
1685 # be empty during the pull
1686 self.manifest.addgroup(chunkiter, revmap, trp)
1686 self.manifest.addgroup(chunkiter, revmap, trp)
1687 self.ui.progress(_('manifests'), None)
1687 self.ui.progress(_('manifests'), None)
1688
1688
1689 needfiles = {}
1689 needfiles = {}
1690 if self.ui.configbool('server', 'validate', default=False):
1690 if self.ui.configbool('server', 'validate', default=False):
1691 # validate incoming csets have their manifests
1691 # validate incoming csets have their manifests
1692 for cset in xrange(clstart, clend):
1692 for cset in xrange(clstart, clend):
1693 mfest = self.changelog.read(self.changelog.node(cset))[0]
1693 mfest = self.changelog.read(self.changelog.node(cset))[0]
1694 mfest = self.manifest.readdelta(mfest)
1694 mfest = self.manifest.readdelta(mfest)
1695 # store file nodes we must see
1695 # store file nodes we must see
1696 for f, n in mfest.iteritems():
1696 for f, n in mfest.iteritems():
1697 needfiles.setdefault(f, set()).add(n)
1697 needfiles.setdefault(f, set()).add(n)
1698
1698
1699 # process the files
1699 # process the files
1700 self.ui.status(_("adding file changes\n"))
1700 self.ui.status(_("adding file changes\n"))
1701 pr.step = 'files'
1701 pr.step = 'files'
1702 pr.count = 1
1702 pr.count = 1
1703 pr.total = efiles
1703 pr.total = efiles
1704 while 1:
1704 while 1:
1705 f = changegroup.getchunk(source)
1705 f = changegroup.getchunk(source)
1706 if not f:
1706 if not f:
1707 break
1707 break
1708 self.ui.debug("adding %s revisions\n" % f)
1708 self.ui.debug("adding %s revisions\n" % f)
1709 pr()
1709 pr()
1710 fl = self.file(f)
1710 fl = self.file(f)
1711 o = len(fl)
1711 o = len(fl)
1712 chunkiter = changegroup.chunkiter(source)
1712 chunkiter = changegroup.chunkiter(source)
1713 if fl.addgroup(chunkiter, revmap, trp) is None:
1713 if fl.addgroup(chunkiter, revmap, trp) is None:
1714 raise util.Abort(_("received file revlog group is empty"))
1714 raise util.Abort(_("received file revlog group is empty"))
1715 revisions += len(fl) - o
1715 revisions += len(fl) - o
1716 files += 1
1716 files += 1
1717 if f in needfiles:
1717 if f in needfiles:
1718 needs = needfiles[f]
1718 needs = needfiles[f]
1719 for new in xrange(o, len(fl)):
1719 for new in xrange(o, len(fl)):
1720 n = fl.node(new)
1720 n = fl.node(new)
1721 if n in needs:
1721 if n in needs:
1722 needs.remove(n)
1722 needs.remove(n)
1723 if not needs:
1723 if not needs:
1724 del needfiles[f]
1724 del needfiles[f]
1725 self.ui.progress(_('files'), None)
1725 self.ui.progress(_('files'), None)
1726
1726
1727 for f, needs in needfiles.iteritems():
1727 for f, needs in needfiles.iteritems():
1728 fl = self.file(f)
1728 fl = self.file(f)
1729 for n in needs:
1729 for n in needs:
1730 try:
1730 try:
1731 fl.rev(n)
1731 fl.rev(n)
1732 except error.LookupError:
1732 except error.LookupError:
1733 raise util.Abort(
1733 raise util.Abort(
1734 _('missing file data for %s:%s - run hg verify') %
1734 _('missing file data for %s:%s - run hg verify') %
1735 (f, hex(n)))
1735 (f, hex(n)))
1736
1736
1737 newheads = len(cl.heads())
1737 newheads = len(cl.heads())
1738 heads = ""
1738 heads = ""
1739 if oldheads and newheads != oldheads:
1739 if oldheads and newheads != oldheads:
1740 heads = _(" (%+d heads)") % (newheads - oldheads)
1740 heads = _(" (%+d heads)") % (newheads - oldheads)
1741
1741
1742 self.ui.status(_("added %d changesets"
1742 self.ui.status(_("added %d changesets"
1743 " with %d changes to %d files%s\n")
1743 " with %d changes to %d files%s\n")
1744 % (changesets, revisions, files, heads))
1744 % (changesets, revisions, files, heads))
1745
1745
1746 if changesets > 0:
1746 if changesets > 0:
1747 p = lambda: cl.writepending() and self.root or ""
1747 p = lambda: cl.writepending() and self.root or ""
1748 self.hook('pretxnchangegroup', throw=True,
1748 self.hook('pretxnchangegroup', throw=True,
1749 node=hex(cl.node(clstart)), source=srctype,
1749 node=hex(cl.node(clstart)), source=srctype,
1750 url=url, pending=p)
1750 url=url, pending=p)
1751
1751
1752 # make changelog see real files again
1752 # make changelog see real files again
1753 cl.finalize(trp)
1753 cl.finalize(trp)
1754
1754
1755 tr.close()
1755 tr.close()
1756 finally:
1756 finally:
1757 tr.release()
1757 tr.release()
1758 if lock:
1758 if lock:
1759 lock.release()
1759 lock.release()
1760
1760
1761 if changesets > 0:
1761 if changesets > 0:
1762 # forcefully update the on-disk branch cache
1762 # forcefully update the on-disk branch cache
1763 self.ui.debug("updating the branch cache\n")
1763 self.ui.debug("updating the branch cache\n")
1764 self.updatebranchcache()
1764 self.updatebranchcache()
1765 self.hook("changegroup", node=hex(cl.node(clstart)),
1765 self.hook("changegroup", node=hex(cl.node(clstart)),
1766 source=srctype, url=url)
1766 source=srctype, url=url)
1767
1767
1768 for i in xrange(clstart, clend):
1768 for i in xrange(clstart, clend):
1769 self.hook("incoming", node=hex(cl.node(i)),
1769 self.hook("incoming", node=hex(cl.node(i)),
1770 source=srctype, url=url)
1770 source=srctype, url=url)
1771
1771
1772 # never return 0 here:
1772 # never return 0 here:
1773 if newheads < oldheads:
1773 if newheads < oldheads:
1774 return newheads - oldheads - 1
1774 return newheads - oldheads - 1
1775 else:
1775 else:
1776 return newheads - oldheads + 1
1776 return newheads - oldheads + 1
1777
1777
1778
1778
1779 def stream_in(self, remote):
1779 def stream_in(self, remote):
1780 fp = remote.stream_out()
1780 fp = remote.stream_out()
1781 l = fp.readline()
1781 l = fp.readline()
1782 try:
1782 try:
1783 resp = int(l)
1783 resp = int(l)
1784 except ValueError:
1784 except ValueError:
1785 raise error.ResponseError(
1785 raise error.ResponseError(
1786 _('Unexpected response from remote server:'), l)
1786 _('Unexpected response from remote server:'), l)
1787 if resp == 1:
1787 if resp == 1:
1788 raise util.Abort(_('operation forbidden by server'))
1788 raise util.Abort(_('operation forbidden by server'))
1789 elif resp == 2:
1789 elif resp == 2:
1790 raise util.Abort(_('locking the remote repository failed'))
1790 raise util.Abort(_('locking the remote repository failed'))
1791 elif resp != 0:
1791 elif resp != 0:
1792 raise util.Abort(_('the server sent an unknown error code'))
1792 raise util.Abort(_('the server sent an unknown error code'))
1793 self.ui.status(_('streaming all changes\n'))
1793 self.ui.status(_('streaming all changes\n'))
1794 l = fp.readline()
1794 l = fp.readline()
1795 try:
1795 try:
1796 total_files, total_bytes = map(int, l.split(' ', 1))
1796 total_files, total_bytes = map(int, l.split(' ', 1))
1797 except (ValueError, TypeError):
1797 except (ValueError, TypeError):
1798 raise error.ResponseError(
1798 raise error.ResponseError(
1799 _('Unexpected response from remote server:'), l)
1799 _('Unexpected response from remote server:'), l)
1800 self.ui.status(_('%d files to transfer, %s of data\n') %
1800 self.ui.status(_('%d files to transfer, %s of data\n') %
1801 (total_files, util.bytecount(total_bytes)))
1801 (total_files, util.bytecount(total_bytes)))
1802 start = time.time()
1802 start = time.time()
1803 for i in xrange(total_files):
1803 for i in xrange(total_files):
1804 # XXX doesn't support '\n' or '\r' in filenames
1804 # XXX doesn't support '\n' or '\r' in filenames
1805 l = fp.readline()
1805 l = fp.readline()
1806 try:
1806 try:
1807 name, size = l.split('\0', 1)
1807 name, size = l.split('\0', 1)
1808 size = int(size)
1808 size = int(size)
1809 except (ValueError, TypeError):
1809 except (ValueError, TypeError):
1810 raise error.ResponseError(
1810 raise error.ResponseError(
1811 _('Unexpected response from remote server:'), l)
1811 _('Unexpected response from remote server:'), l)
1812 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1812 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1813 # for backwards compat, name was partially encoded
1813 # for backwards compat, name was partially encoded
1814 ofp = self.sopener(store.decodedir(name), 'w')
1814 ofp = self.sopener(store.decodedir(name), 'w')
1815 for chunk in util.filechunkiter(fp, limit=size):
1815 for chunk in util.filechunkiter(fp, limit=size):
1816 ofp.write(chunk)
1816 ofp.write(chunk)
1817 ofp.close()
1817 ofp.close()
1818 elapsed = time.time() - start
1818 elapsed = time.time() - start
1819 if elapsed <= 0:
1819 if elapsed <= 0:
1820 elapsed = 0.001
1820 elapsed = 0.001
1821 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1821 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1822 (util.bytecount(total_bytes), elapsed,
1822 (util.bytecount(total_bytes), elapsed,
1823 util.bytecount(total_bytes / elapsed)))
1823 util.bytecount(total_bytes / elapsed)))
1824 self.invalidate()
1824 self.invalidate()
1825 return len(self.heads()) + 1
1825 return len(self.heads()) + 1
1826
1826
1827 def clone(self, remote, heads=[], stream=False):
1827 def clone(self, remote, heads=[], stream=False):
1828 '''clone remote repository.
1828 '''clone remote repository.
1829
1829
1830 keyword arguments:
1830 keyword arguments:
1831 heads: list of revs to clone (forces use of pull)
1831 heads: list of revs to clone (forces use of pull)
1832 stream: use streaming clone if possible'''
1832 stream: use streaming clone if possible'''
1833
1833
1834 # now, all clients that can request uncompressed clones can
1834 # now, all clients that can request uncompressed clones can
1835 # read repo formats supported by all servers that can serve
1835 # read repo formats supported by all servers that can serve
1836 # them.
1836 # them.
1837
1837
1838 # if revlog format changes, client will have to check version
1838 # if revlog format changes, client will have to check version
1839 # and format flags on "stream" capability, and use
1839 # and format flags on "stream" capability, and use
1840 # uncompressed only if compatible.
1840 # uncompressed only if compatible.
1841
1841
1842 if stream and not heads and remote.capable('stream'):
1842 if stream and not heads and remote.capable('stream'):
1843 return self.stream_in(remote)
1843 return self.stream_in(remote)
1844 return self.pull(remote, heads)
1844 return self.pull(remote, heads)
1845
1845
1846 def pushkey(self, namespace, key, old, new):
1846 def pushkey(self, namespace, key, old, new):
1847 return pushkey.push(self, namespace, key, old, new)
1847 return pushkey.push(self, namespace, key, old, new)
1848
1848
1849 def listkeys(self, namespace):
1849 def listkeys(self, namespace):
1850 return pushkey.list(self, namespace)
1850 return pushkey.list(self, namespace)
1851
1851
1852 # used to avoid circular references so destructors work
1852 # used to avoid circular references so destructors work
1853 def aftertrans(files):
1853 def aftertrans(files):
1854 renamefiles = [tuple(t) for t in files]
1854 renamefiles = [tuple(t) for t in files]
1855 def a():
1855 def a():
1856 for src, dest in renamefiles:
1856 for src, dest in renamefiles:
1857 util.rename(src, dest)
1857 util.rename(src, dest)
1858 return a
1858 return a
1859
1859
1860 def instance(ui, path, create):
1860 def instance(ui, path, create):
1861 return localrepository(ui, util.drop_scheme('file', path), create)
1861 return localrepository(ui, util.drop_scheme('file', path), create)
1862
1862
1863 def islocal(path):
1863 def islocal(path):
1864 return True
1864 return True
General Comments 0
You need to be logged in to leave comments. Login now