##// END OF EJS Templates
localrepo: have _loadfilter return the loaded filter patterns
Nicolas Dumazet -
r12706:9ca08fbb default
parent child Browse files
Show More
@@ -1,1893 +1,1892 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 import url as urlmod
17 import url as urlmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 propertycache = util.propertycache
20 propertycache = util.propertycache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
24 supportedformats = set(('revlogv1', 'parentdelta'))
24 supportedformats = set(('revlogv1', 'parentdelta'))
25 supported = supportedformats | set(('store', 'fncache', 'shared',
25 supported = supportedformats | set(('store', 'fncache', 'shared',
26 'dotencode'))
26 'dotencode'))
27
27
28 def __init__(self, baseui, path=None, create=0):
28 def __init__(self, baseui, path=None, create=0):
29 repo.repository.__init__(self)
29 repo.repository.__init__(self)
30 self.root = os.path.realpath(util.expandpath(path))
30 self.root = os.path.realpath(util.expandpath(path))
31 self.path = os.path.join(self.root, ".hg")
31 self.path = os.path.join(self.root, ".hg")
32 self.origroot = path
32 self.origroot = path
33 self.auditor = util.path_auditor(self.root, self._checknested)
33 self.auditor = util.path_auditor(self.root, self._checknested)
34 self.opener = util.opener(self.path)
34 self.opener = util.opener(self.path)
35 self.wopener = util.opener(self.root)
35 self.wopener = util.opener(self.root)
36 self.baseui = baseui
36 self.baseui = baseui
37 self.ui = baseui.copy()
37 self.ui = baseui.copy()
38
38
39 try:
39 try:
40 self.ui.readconfig(self.join("hgrc"), self.root)
40 self.ui.readconfig(self.join("hgrc"), self.root)
41 extensions.loadall(self.ui)
41 extensions.loadall(self.ui)
42 except IOError:
42 except IOError:
43 pass
43 pass
44
44
45 if not os.path.isdir(self.path):
45 if not os.path.isdir(self.path):
46 if create:
46 if create:
47 if not os.path.exists(path):
47 if not os.path.exists(path):
48 util.makedirs(path)
48 util.makedirs(path)
49 os.mkdir(self.path)
49 os.mkdir(self.path)
50 requirements = ["revlogv1"]
50 requirements = ["revlogv1"]
51 if self.ui.configbool('format', 'usestore', True):
51 if self.ui.configbool('format', 'usestore', True):
52 os.mkdir(os.path.join(self.path, "store"))
52 os.mkdir(os.path.join(self.path, "store"))
53 requirements.append("store")
53 requirements.append("store")
54 if self.ui.configbool('format', 'usefncache', True):
54 if self.ui.configbool('format', 'usefncache', True):
55 requirements.append("fncache")
55 requirements.append("fncache")
56 if self.ui.configbool('format', 'dotencode', True):
56 if self.ui.configbool('format', 'dotencode', True):
57 requirements.append('dotencode')
57 requirements.append('dotencode')
58 # create an invalid changelog
58 # create an invalid changelog
59 self.opener("00changelog.i", "a").write(
59 self.opener("00changelog.i", "a").write(
60 '\0\0\0\2' # represents revlogv2
60 '\0\0\0\2' # represents revlogv2
61 ' dummy changelog to prevent using the old repo layout'
61 ' dummy changelog to prevent using the old repo layout'
62 )
62 )
63 if self.ui.configbool('format', 'parentdelta', False):
63 if self.ui.configbool('format', 'parentdelta', False):
64 requirements.append("parentdelta")
64 requirements.append("parentdelta")
65 else:
65 else:
66 raise error.RepoError(_("repository %s not found") % path)
66 raise error.RepoError(_("repository %s not found") % path)
67 elif create:
67 elif create:
68 raise error.RepoError(_("repository %s already exists") % path)
68 raise error.RepoError(_("repository %s already exists") % path)
69 else:
69 else:
70 # find requirements
70 # find requirements
71 requirements = set()
71 requirements = set()
72 try:
72 try:
73 requirements = set(self.opener("requires").read().splitlines())
73 requirements = set(self.opener("requires").read().splitlines())
74 except IOError, inst:
74 except IOError, inst:
75 if inst.errno != errno.ENOENT:
75 if inst.errno != errno.ENOENT:
76 raise
76 raise
77 for r in requirements - self.supported:
77 for r in requirements - self.supported:
78 raise error.RepoError(_("requirement '%s' not supported") % r)
78 raise error.RepoError(_("requirement '%s' not supported") % r)
79
79
80 self.sharedpath = self.path
80 self.sharedpath = self.path
81 try:
81 try:
82 s = os.path.realpath(self.opener("sharedpath").read())
82 s = os.path.realpath(self.opener("sharedpath").read())
83 if not os.path.exists(s):
83 if not os.path.exists(s):
84 raise error.RepoError(
84 raise error.RepoError(
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 self.sharedpath = s
86 self.sharedpath = s
87 except IOError, inst:
87 except IOError, inst:
88 if inst.errno != errno.ENOENT:
88 if inst.errno != errno.ENOENT:
89 raise
89 raise
90
90
91 self.store = store.store(requirements, self.sharedpath, util.opener)
91 self.store = store.store(requirements, self.sharedpath, util.opener)
92 self.spath = self.store.path
92 self.spath = self.store.path
93 self.sopener = self.store.opener
93 self.sopener = self.store.opener
94 self.sjoin = self.store.join
94 self.sjoin = self.store.join
95 self.opener.createmode = self.store.createmode
95 self.opener.createmode = self.store.createmode
96 self._applyrequirements(requirements)
96 self._applyrequirements(requirements)
97 if create:
97 if create:
98 self._writerequirements()
98 self._writerequirements()
99
99
100 # These two define the set of tags for this repository. _tags
100 # These two define the set of tags for this repository. _tags
101 # maps tag name to node; _tagtypes maps tag name to 'global' or
101 # maps tag name to node; _tagtypes maps tag name to 'global' or
102 # 'local'. (Global tags are defined by .hgtags across all
102 # 'local'. (Global tags are defined by .hgtags across all
103 # heads, and local tags are defined in .hg/localtags.) They
103 # heads, and local tags are defined in .hg/localtags.) They
104 # constitute the in-memory cache of tags.
104 # constitute the in-memory cache of tags.
105 self._tags = None
105 self._tags = None
106 self._tagtypes = None
106 self._tagtypes = None
107
107
108 self._branchcache = None # in UTF-8
108 self._branchcache = None # in UTF-8
109 self._branchcachetip = None
109 self._branchcachetip = None
110 self.nodetagscache = None
110 self.nodetagscache = None
111 self.filterpats = {}
111 self.filterpats = {}
112 self._datafilters = {}
112 self._datafilters = {}
113 self._transref = self._lockref = self._wlockref = None
113 self._transref = self._lockref = self._wlockref = None
114
114
115 def _applyrequirements(self, requirements):
115 def _applyrequirements(self, requirements):
116 self.requirements = requirements
116 self.requirements = requirements
117 self.sopener.options = {}
117 self.sopener.options = {}
118 if 'parentdelta' in requirements:
118 if 'parentdelta' in requirements:
119 self.sopener.options['parentdelta'] = 1
119 self.sopener.options['parentdelta'] = 1
120
120
121 def _writerequirements(self):
121 def _writerequirements(self):
122 reqfile = self.opener("requires", "w")
122 reqfile = self.opener("requires", "w")
123 for r in self.requirements:
123 for r in self.requirements:
124 reqfile.write("%s\n" % r)
124 reqfile.write("%s\n" % r)
125 reqfile.close()
125 reqfile.close()
126
126
127 def _checknested(self, path):
127 def _checknested(self, path):
128 """Determine if path is a legal nested repository."""
128 """Determine if path is a legal nested repository."""
129 if not path.startswith(self.root):
129 if not path.startswith(self.root):
130 return False
130 return False
131 subpath = path[len(self.root) + 1:]
131 subpath = path[len(self.root) + 1:]
132
132
133 # XXX: Checking against the current working copy is wrong in
133 # XXX: Checking against the current working copy is wrong in
134 # the sense that it can reject things like
134 # the sense that it can reject things like
135 #
135 #
136 # $ hg cat -r 10 sub/x.txt
136 # $ hg cat -r 10 sub/x.txt
137 #
137 #
138 # if sub/ is no longer a subrepository in the working copy
138 # if sub/ is no longer a subrepository in the working copy
139 # parent revision.
139 # parent revision.
140 #
140 #
141 # However, it can of course also allow things that would have
141 # However, it can of course also allow things that would have
142 # been rejected before, such as the above cat command if sub/
142 # been rejected before, such as the above cat command if sub/
143 # is a subrepository now, but was a normal directory before.
143 # is a subrepository now, but was a normal directory before.
144 # The old path auditor would have rejected by mistake since it
144 # The old path auditor would have rejected by mistake since it
145 # panics when it sees sub/.hg/.
145 # panics when it sees sub/.hg/.
146 #
146 #
147 # All in all, checking against the working copy seems sensible
147 # All in all, checking against the working copy seems sensible
148 # since we want to prevent access to nested repositories on
148 # since we want to prevent access to nested repositories on
149 # the filesystem *now*.
149 # the filesystem *now*.
150 ctx = self[None]
150 ctx = self[None]
151 parts = util.splitpath(subpath)
151 parts = util.splitpath(subpath)
152 while parts:
152 while parts:
153 prefix = os.sep.join(parts)
153 prefix = os.sep.join(parts)
154 if prefix in ctx.substate:
154 if prefix in ctx.substate:
155 if prefix == subpath:
155 if prefix == subpath:
156 return True
156 return True
157 else:
157 else:
158 sub = ctx.sub(prefix)
158 sub = ctx.sub(prefix)
159 return sub.checknested(subpath[len(prefix) + 1:])
159 return sub.checknested(subpath[len(prefix) + 1:])
160 else:
160 else:
161 parts.pop()
161 parts.pop()
162 return False
162 return False
163
163
164
164
165 @propertycache
165 @propertycache
166 def changelog(self):
166 def changelog(self):
167 c = changelog.changelog(self.sopener)
167 c = changelog.changelog(self.sopener)
168 if 'HG_PENDING' in os.environ:
168 if 'HG_PENDING' in os.environ:
169 p = os.environ['HG_PENDING']
169 p = os.environ['HG_PENDING']
170 if p.startswith(self.root):
170 if p.startswith(self.root):
171 c.readpending('00changelog.i.a')
171 c.readpending('00changelog.i.a')
172 self.sopener.options['defversion'] = c.version
172 self.sopener.options['defversion'] = c.version
173 return c
173 return c
174
174
175 @propertycache
175 @propertycache
176 def manifest(self):
176 def manifest(self):
177 return manifest.manifest(self.sopener)
177 return manifest.manifest(self.sopener)
178
178
179 @propertycache
179 @propertycache
180 def dirstate(self):
180 def dirstate(self):
181 return dirstate.dirstate(self.opener, self.ui, self.root)
181 return dirstate.dirstate(self.opener, self.ui, self.root)
182
182
183 def __getitem__(self, changeid):
183 def __getitem__(self, changeid):
184 if changeid is None:
184 if changeid is None:
185 return context.workingctx(self)
185 return context.workingctx(self)
186 return context.changectx(self, changeid)
186 return context.changectx(self, changeid)
187
187
188 def __contains__(self, changeid):
188 def __contains__(self, changeid):
189 try:
189 try:
190 return bool(self.lookup(changeid))
190 return bool(self.lookup(changeid))
191 except error.RepoLookupError:
191 except error.RepoLookupError:
192 return False
192 return False
193
193
194 def __nonzero__(self):
194 def __nonzero__(self):
195 return True
195 return True
196
196
197 def __len__(self):
197 def __len__(self):
198 return len(self.changelog)
198 return len(self.changelog)
199
199
200 def __iter__(self):
200 def __iter__(self):
201 for i in xrange(len(self)):
201 for i in xrange(len(self)):
202 yield i
202 yield i
203
203
204 def url(self):
204 def url(self):
205 return 'file:' + self.root
205 return 'file:' + self.root
206
206
207 def hook(self, name, throw=False, **args):
207 def hook(self, name, throw=False, **args):
208 return hook.hook(self.ui, self, name, throw, **args)
208 return hook.hook(self.ui, self, name, throw, **args)
209
209
210 tag_disallowed = ':\r\n'
210 tag_disallowed = ':\r\n'
211
211
212 def _tag(self, names, node, message, local, user, date, extra={}):
212 def _tag(self, names, node, message, local, user, date, extra={}):
213 if isinstance(names, str):
213 if isinstance(names, str):
214 allchars = names
214 allchars = names
215 names = (names,)
215 names = (names,)
216 else:
216 else:
217 allchars = ''.join(names)
217 allchars = ''.join(names)
218 for c in self.tag_disallowed:
218 for c in self.tag_disallowed:
219 if c in allchars:
219 if c in allchars:
220 raise util.Abort(_('%r cannot be used in a tag name') % c)
220 raise util.Abort(_('%r cannot be used in a tag name') % c)
221
221
222 branches = self.branchmap()
222 branches = self.branchmap()
223 for name in names:
223 for name in names:
224 self.hook('pretag', throw=True, node=hex(node), tag=name,
224 self.hook('pretag', throw=True, node=hex(node), tag=name,
225 local=local)
225 local=local)
226 if name in branches:
226 if name in branches:
227 self.ui.warn(_("warning: tag %s conflicts with existing"
227 self.ui.warn(_("warning: tag %s conflicts with existing"
228 " branch name\n") % name)
228 " branch name\n") % name)
229
229
230 def writetags(fp, names, munge, prevtags):
230 def writetags(fp, names, munge, prevtags):
231 fp.seek(0, 2)
231 fp.seek(0, 2)
232 if prevtags and prevtags[-1] != '\n':
232 if prevtags and prevtags[-1] != '\n':
233 fp.write('\n')
233 fp.write('\n')
234 for name in names:
234 for name in names:
235 m = munge and munge(name) or name
235 m = munge and munge(name) or name
236 if self._tagtypes and name in self._tagtypes:
236 if self._tagtypes and name in self._tagtypes:
237 old = self._tags.get(name, nullid)
237 old = self._tags.get(name, nullid)
238 fp.write('%s %s\n' % (hex(old), m))
238 fp.write('%s %s\n' % (hex(old), m))
239 fp.write('%s %s\n' % (hex(node), m))
239 fp.write('%s %s\n' % (hex(node), m))
240 fp.close()
240 fp.close()
241
241
242 prevtags = ''
242 prevtags = ''
243 if local:
243 if local:
244 try:
244 try:
245 fp = self.opener('localtags', 'r+')
245 fp = self.opener('localtags', 'r+')
246 except IOError:
246 except IOError:
247 fp = self.opener('localtags', 'a')
247 fp = self.opener('localtags', 'a')
248 else:
248 else:
249 prevtags = fp.read()
249 prevtags = fp.read()
250
250
251 # local tags are stored in the current charset
251 # local tags are stored in the current charset
252 writetags(fp, names, None, prevtags)
252 writetags(fp, names, None, prevtags)
253 for name in names:
253 for name in names:
254 self.hook('tag', node=hex(node), tag=name, local=local)
254 self.hook('tag', node=hex(node), tag=name, local=local)
255 return
255 return
256
256
257 try:
257 try:
258 fp = self.wfile('.hgtags', 'rb+')
258 fp = self.wfile('.hgtags', 'rb+')
259 except IOError:
259 except IOError:
260 fp = self.wfile('.hgtags', 'ab')
260 fp = self.wfile('.hgtags', 'ab')
261 else:
261 else:
262 prevtags = fp.read()
262 prevtags = fp.read()
263
263
264 # committed tags are stored in UTF-8
264 # committed tags are stored in UTF-8
265 writetags(fp, names, encoding.fromlocal, prevtags)
265 writetags(fp, names, encoding.fromlocal, prevtags)
266
266
267 if '.hgtags' not in self.dirstate:
267 if '.hgtags' not in self.dirstate:
268 self[None].add(['.hgtags'])
268 self[None].add(['.hgtags'])
269
269
270 m = matchmod.exact(self.root, '', ['.hgtags'])
270 m = matchmod.exact(self.root, '', ['.hgtags'])
271 tagnode = self.commit(message, user, date, extra=extra, match=m)
271 tagnode = self.commit(message, user, date, extra=extra, match=m)
272
272
273 for name in names:
273 for name in names:
274 self.hook('tag', node=hex(node), tag=name, local=local)
274 self.hook('tag', node=hex(node), tag=name, local=local)
275
275
276 return tagnode
276 return tagnode
277
277
278 def tag(self, names, node, message, local, user, date):
278 def tag(self, names, node, message, local, user, date):
279 '''tag a revision with one or more symbolic names.
279 '''tag a revision with one or more symbolic names.
280
280
281 names is a list of strings or, when adding a single tag, names may be a
281 names is a list of strings or, when adding a single tag, names may be a
282 string.
282 string.
283
283
284 if local is True, the tags are stored in a per-repository file.
284 if local is True, the tags are stored in a per-repository file.
285 otherwise, they are stored in the .hgtags file, and a new
285 otherwise, they are stored in the .hgtags file, and a new
286 changeset is committed with the change.
286 changeset is committed with the change.
287
287
288 keyword arguments:
288 keyword arguments:
289
289
290 local: whether to store tags in non-version-controlled file
290 local: whether to store tags in non-version-controlled file
291 (default False)
291 (default False)
292
292
293 message: commit message to use if committing
293 message: commit message to use if committing
294
294
295 user: name of user to use if committing
295 user: name of user to use if committing
296
296
297 date: date tuple to use if committing'''
297 date: date tuple to use if committing'''
298
298
299 for x in self.status()[:5]:
299 for x in self.status()[:5]:
300 if '.hgtags' in x:
300 if '.hgtags' in x:
301 raise util.Abort(_('working copy of .hgtags is changed '
301 raise util.Abort(_('working copy of .hgtags is changed '
302 '(please commit .hgtags manually)'))
302 '(please commit .hgtags manually)'))
303
303
304 self.tags() # instantiate the cache
304 self.tags() # instantiate the cache
305 self._tag(names, node, message, local, user, date)
305 self._tag(names, node, message, local, user, date)
306
306
307 def tags(self):
307 def tags(self):
308 '''return a mapping of tag to node'''
308 '''return a mapping of tag to node'''
309 if self._tags is None:
309 if self._tags is None:
310 (self._tags, self._tagtypes) = self._findtags()
310 (self._tags, self._tagtypes) = self._findtags()
311
311
312 return self._tags
312 return self._tags
313
313
314 def _findtags(self):
314 def _findtags(self):
315 '''Do the hard work of finding tags. Return a pair of dicts
315 '''Do the hard work of finding tags. Return a pair of dicts
316 (tags, tagtypes) where tags maps tag name to node, and tagtypes
316 (tags, tagtypes) where tags maps tag name to node, and tagtypes
317 maps tag name to a string like \'global\' or \'local\'.
317 maps tag name to a string like \'global\' or \'local\'.
318 Subclasses or extensions are free to add their own tags, but
318 Subclasses or extensions are free to add their own tags, but
319 should be aware that the returned dicts will be retained for the
319 should be aware that the returned dicts will be retained for the
320 duration of the localrepo object.'''
320 duration of the localrepo object.'''
321
321
322 # XXX what tagtype should subclasses/extensions use? Currently
322 # XXX what tagtype should subclasses/extensions use? Currently
323 # mq and bookmarks add tags, but do not set the tagtype at all.
323 # mq and bookmarks add tags, but do not set the tagtype at all.
324 # Should each extension invent its own tag type? Should there
324 # Should each extension invent its own tag type? Should there
325 # be one tagtype for all such "virtual" tags? Or is the status
325 # be one tagtype for all such "virtual" tags? Or is the status
326 # quo fine?
326 # quo fine?
327
327
328 alltags = {} # map tag name to (node, hist)
328 alltags = {} # map tag name to (node, hist)
329 tagtypes = {}
329 tagtypes = {}
330
330
331 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
331 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
332 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
332 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
333
333
334 # Build the return dicts. Have to re-encode tag names because
334 # Build the return dicts. Have to re-encode tag names because
335 # the tags module always uses UTF-8 (in order not to lose info
335 # the tags module always uses UTF-8 (in order not to lose info
336 # writing to the cache), but the rest of Mercurial wants them in
336 # writing to the cache), but the rest of Mercurial wants them in
337 # local encoding.
337 # local encoding.
338 tags = {}
338 tags = {}
339 for (name, (node, hist)) in alltags.iteritems():
339 for (name, (node, hist)) in alltags.iteritems():
340 if node != nullid:
340 if node != nullid:
341 tags[encoding.tolocal(name)] = node
341 tags[encoding.tolocal(name)] = node
342 tags['tip'] = self.changelog.tip()
342 tags['tip'] = self.changelog.tip()
343 tagtypes = dict([(encoding.tolocal(name), value)
343 tagtypes = dict([(encoding.tolocal(name), value)
344 for (name, value) in tagtypes.iteritems()])
344 for (name, value) in tagtypes.iteritems()])
345 return (tags, tagtypes)
345 return (tags, tagtypes)
346
346
347 def tagtype(self, tagname):
347 def tagtype(self, tagname):
348 '''
348 '''
349 return the type of the given tag. result can be:
349 return the type of the given tag. result can be:
350
350
351 'local' : a local tag
351 'local' : a local tag
352 'global' : a global tag
352 'global' : a global tag
353 None : tag does not exist
353 None : tag does not exist
354 '''
354 '''
355
355
356 self.tags()
356 self.tags()
357
357
358 return self._tagtypes.get(tagname)
358 return self._tagtypes.get(tagname)
359
359
360 def tagslist(self):
360 def tagslist(self):
361 '''return a list of tags ordered by revision'''
361 '''return a list of tags ordered by revision'''
362 l = []
362 l = []
363 for t, n in self.tags().iteritems():
363 for t, n in self.tags().iteritems():
364 try:
364 try:
365 r = self.changelog.rev(n)
365 r = self.changelog.rev(n)
366 except:
366 except:
367 r = -2 # sort to the beginning of the list if unknown
367 r = -2 # sort to the beginning of the list if unknown
368 l.append((r, t, n))
368 l.append((r, t, n))
369 return [(t, n) for r, t, n in sorted(l)]
369 return [(t, n) for r, t, n in sorted(l)]
370
370
371 def nodetags(self, node):
371 def nodetags(self, node):
372 '''return the tags associated with a node'''
372 '''return the tags associated with a node'''
373 if not self.nodetagscache:
373 if not self.nodetagscache:
374 self.nodetagscache = {}
374 self.nodetagscache = {}
375 for t, n in self.tags().iteritems():
375 for t, n in self.tags().iteritems():
376 self.nodetagscache.setdefault(n, []).append(t)
376 self.nodetagscache.setdefault(n, []).append(t)
377 for tags in self.nodetagscache.itervalues():
377 for tags in self.nodetagscache.itervalues():
378 tags.sort()
378 tags.sort()
379 return self.nodetagscache.get(node, [])
379 return self.nodetagscache.get(node, [])
380
380
381 def _branchtags(self, partial, lrev):
381 def _branchtags(self, partial, lrev):
382 # TODO: rename this function?
382 # TODO: rename this function?
383 tiprev = len(self) - 1
383 tiprev = len(self) - 1
384 if lrev != tiprev:
384 if lrev != tiprev:
385 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
385 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
386 self._updatebranchcache(partial, ctxgen)
386 self._updatebranchcache(partial, ctxgen)
387 self._writebranchcache(partial, self.changelog.tip(), tiprev)
387 self._writebranchcache(partial, self.changelog.tip(), tiprev)
388
388
389 return partial
389 return partial
390
390
391 def updatebranchcache(self):
391 def updatebranchcache(self):
392 tip = self.changelog.tip()
392 tip = self.changelog.tip()
393 if self._branchcache is not None and self._branchcachetip == tip:
393 if self._branchcache is not None and self._branchcachetip == tip:
394 return self._branchcache
394 return self._branchcache
395
395
396 oldtip = self._branchcachetip
396 oldtip = self._branchcachetip
397 self._branchcachetip = tip
397 self._branchcachetip = tip
398 if oldtip is None or oldtip not in self.changelog.nodemap:
398 if oldtip is None or oldtip not in self.changelog.nodemap:
399 partial, last, lrev = self._readbranchcache()
399 partial, last, lrev = self._readbranchcache()
400 else:
400 else:
401 lrev = self.changelog.rev(oldtip)
401 lrev = self.changelog.rev(oldtip)
402 partial = self._branchcache
402 partial = self._branchcache
403
403
404 self._branchtags(partial, lrev)
404 self._branchtags(partial, lrev)
405 # this private cache holds all heads (not just tips)
405 # this private cache holds all heads (not just tips)
406 self._branchcache = partial
406 self._branchcache = partial
407
407
408 def branchmap(self):
408 def branchmap(self):
409 '''returns a dictionary {branch: [branchheads]}'''
409 '''returns a dictionary {branch: [branchheads]}'''
410 self.updatebranchcache()
410 self.updatebranchcache()
411 return self._branchcache
411 return self._branchcache
412
412
413 def branchtags(self):
413 def branchtags(self):
414 '''return a dict where branch names map to the tipmost head of
414 '''return a dict where branch names map to the tipmost head of
415 the branch, open heads come before closed'''
415 the branch, open heads come before closed'''
416 bt = {}
416 bt = {}
417 for bn, heads in self.branchmap().iteritems():
417 for bn, heads in self.branchmap().iteritems():
418 tip = heads[-1]
418 tip = heads[-1]
419 for h in reversed(heads):
419 for h in reversed(heads):
420 if 'close' not in self.changelog.read(h)[5]:
420 if 'close' not in self.changelog.read(h)[5]:
421 tip = h
421 tip = h
422 break
422 break
423 bt[bn] = tip
423 bt[bn] = tip
424 return bt
424 return bt
425
425
426
426
427 def _readbranchcache(self):
427 def _readbranchcache(self):
428 partial = {}
428 partial = {}
429 try:
429 try:
430 f = self.opener("branchheads.cache")
430 f = self.opener("branchheads.cache")
431 lines = f.read().split('\n')
431 lines = f.read().split('\n')
432 f.close()
432 f.close()
433 except (IOError, OSError):
433 except (IOError, OSError):
434 return {}, nullid, nullrev
434 return {}, nullid, nullrev
435
435
436 try:
436 try:
437 last, lrev = lines.pop(0).split(" ", 1)
437 last, lrev = lines.pop(0).split(" ", 1)
438 last, lrev = bin(last), int(lrev)
438 last, lrev = bin(last), int(lrev)
439 if lrev >= len(self) or self[lrev].node() != last:
439 if lrev >= len(self) or self[lrev].node() != last:
440 # invalidate the cache
440 # invalidate the cache
441 raise ValueError('invalidating branch cache (tip differs)')
441 raise ValueError('invalidating branch cache (tip differs)')
442 for l in lines:
442 for l in lines:
443 if not l:
443 if not l:
444 continue
444 continue
445 node, label = l.split(" ", 1)
445 node, label = l.split(" ", 1)
446 partial.setdefault(label.strip(), []).append(bin(node))
446 partial.setdefault(label.strip(), []).append(bin(node))
447 except KeyboardInterrupt:
447 except KeyboardInterrupt:
448 raise
448 raise
449 except Exception, inst:
449 except Exception, inst:
450 if self.ui.debugflag:
450 if self.ui.debugflag:
451 self.ui.warn(str(inst), '\n')
451 self.ui.warn(str(inst), '\n')
452 partial, last, lrev = {}, nullid, nullrev
452 partial, last, lrev = {}, nullid, nullrev
453 return partial, last, lrev
453 return partial, last, lrev
454
454
455 def _writebranchcache(self, branches, tip, tiprev):
455 def _writebranchcache(self, branches, tip, tiprev):
456 try:
456 try:
457 f = self.opener("branchheads.cache", "w", atomictemp=True)
457 f = self.opener("branchheads.cache", "w", atomictemp=True)
458 f.write("%s %s\n" % (hex(tip), tiprev))
458 f.write("%s %s\n" % (hex(tip), tiprev))
459 for label, nodes in branches.iteritems():
459 for label, nodes in branches.iteritems():
460 for node in nodes:
460 for node in nodes:
461 f.write("%s %s\n" % (hex(node), label))
461 f.write("%s %s\n" % (hex(node), label))
462 f.rename()
462 f.rename()
463 except (IOError, OSError):
463 except (IOError, OSError):
464 pass
464 pass
465
465
466 def _updatebranchcache(self, partial, ctxgen):
466 def _updatebranchcache(self, partial, ctxgen):
467 # collect new branch entries
467 # collect new branch entries
468 newbranches = {}
468 newbranches = {}
469 for c in ctxgen:
469 for c in ctxgen:
470 newbranches.setdefault(c.branch(), []).append(c.node())
470 newbranches.setdefault(c.branch(), []).append(c.node())
471 # if older branchheads are reachable from new ones, they aren't
471 # if older branchheads are reachable from new ones, they aren't
472 # really branchheads. Note checking parents is insufficient:
472 # really branchheads. Note checking parents is insufficient:
473 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
473 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
474 for branch, newnodes in newbranches.iteritems():
474 for branch, newnodes in newbranches.iteritems():
475 bheads = partial.setdefault(branch, [])
475 bheads = partial.setdefault(branch, [])
476 bheads.extend(newnodes)
476 bheads.extend(newnodes)
477 if len(bheads) <= 1:
477 if len(bheads) <= 1:
478 continue
478 continue
479 # starting from tip means fewer passes over reachable
479 # starting from tip means fewer passes over reachable
480 while newnodes:
480 while newnodes:
481 latest = newnodes.pop()
481 latest = newnodes.pop()
482 if latest not in bheads:
482 if latest not in bheads:
483 continue
483 continue
484 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
484 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
485 reachable = self.changelog.reachable(latest, minbhrev)
485 reachable = self.changelog.reachable(latest, minbhrev)
486 reachable.remove(latest)
486 reachable.remove(latest)
487 bheads = [b for b in bheads if b not in reachable]
487 bheads = [b for b in bheads if b not in reachable]
488 partial[branch] = bheads
488 partial[branch] = bheads
489
489
490 def lookup(self, key):
490 def lookup(self, key):
491 if isinstance(key, int):
491 if isinstance(key, int):
492 return self.changelog.node(key)
492 return self.changelog.node(key)
493 elif key == '.':
493 elif key == '.':
494 return self.dirstate.parents()[0]
494 return self.dirstate.parents()[0]
495 elif key == 'null':
495 elif key == 'null':
496 return nullid
496 return nullid
497 elif key == 'tip':
497 elif key == 'tip':
498 return self.changelog.tip()
498 return self.changelog.tip()
499 n = self.changelog._match(key)
499 n = self.changelog._match(key)
500 if n:
500 if n:
501 return n
501 return n
502 if key in self.tags():
502 if key in self.tags():
503 return self.tags()[key]
503 return self.tags()[key]
504 if key in self.branchtags():
504 if key in self.branchtags():
505 return self.branchtags()[key]
505 return self.branchtags()[key]
506 n = self.changelog._partialmatch(key)
506 n = self.changelog._partialmatch(key)
507 if n:
507 if n:
508 return n
508 return n
509
509
510 # can't find key, check if it might have come from damaged dirstate
510 # can't find key, check if it might have come from damaged dirstate
511 if key in self.dirstate.parents():
511 if key in self.dirstate.parents():
512 raise error.Abort(_("working directory has unknown parent '%s'!")
512 raise error.Abort(_("working directory has unknown parent '%s'!")
513 % short(key))
513 % short(key))
514 try:
514 try:
515 if len(key) == 20:
515 if len(key) == 20:
516 key = hex(key)
516 key = hex(key)
517 except:
517 except:
518 pass
518 pass
519 raise error.RepoLookupError(_("unknown revision '%s'") % key)
519 raise error.RepoLookupError(_("unknown revision '%s'") % key)
520
520
521 def lookupbranch(self, key, remote=None):
521 def lookupbranch(self, key, remote=None):
522 repo = remote or self
522 repo = remote or self
523 if key in repo.branchmap():
523 if key in repo.branchmap():
524 return key
524 return key
525
525
526 repo = (remote and remote.local()) and remote or self
526 repo = (remote and remote.local()) and remote or self
527 return repo[key].branch()
527 return repo[key].branch()
528
528
529 def local(self):
529 def local(self):
530 return True
530 return True
531
531
532 def join(self, f):
532 def join(self, f):
533 return os.path.join(self.path, f)
533 return os.path.join(self.path, f)
534
534
535 def wjoin(self, f):
535 def wjoin(self, f):
536 return os.path.join(self.root, f)
536 return os.path.join(self.root, f)
537
537
538 def file(self, f):
538 def file(self, f):
539 if f[0] == '/':
539 if f[0] == '/':
540 f = f[1:]
540 f = f[1:]
541 return filelog.filelog(self.sopener, f)
541 return filelog.filelog(self.sopener, f)
542
542
543 def changectx(self, changeid):
543 def changectx(self, changeid):
544 return self[changeid]
544 return self[changeid]
545
545
546 def parents(self, changeid=None):
546 def parents(self, changeid=None):
547 '''get list of changectxs for parents of changeid'''
547 '''get list of changectxs for parents of changeid'''
548 return self[changeid].parents()
548 return self[changeid].parents()
549
549
550 def filectx(self, path, changeid=None, fileid=None):
550 def filectx(self, path, changeid=None, fileid=None):
551 """changeid can be a changeset revision, node, or tag.
551 """changeid can be a changeset revision, node, or tag.
552 fileid can be a file revision or node."""
552 fileid can be a file revision or node."""
553 return context.filectx(self, path, changeid, fileid)
553 return context.filectx(self, path, changeid, fileid)
554
554
555 def getcwd(self):
555 def getcwd(self):
556 return self.dirstate.getcwd()
556 return self.dirstate.getcwd()
557
557
558 def pathto(self, f, cwd=None):
558 def pathto(self, f, cwd=None):
559 return self.dirstate.pathto(f, cwd)
559 return self.dirstate.pathto(f, cwd)
560
560
561 def wfile(self, f, mode='r'):
561 def wfile(self, f, mode='r'):
562 return self.wopener(f, mode)
562 return self.wopener(f, mode)
563
563
564 def _link(self, f):
564 def _link(self, f):
565 return os.path.islink(self.wjoin(f))
565 return os.path.islink(self.wjoin(f))
566
566
567 def _loadfilter(self, filter):
567 def _loadfilter(self, filter):
568 if filter not in self.filterpats:
568 if filter not in self.filterpats:
569 l = []
569 l = []
570 for pat, cmd in self.ui.configitems(filter):
570 for pat, cmd in self.ui.configitems(filter):
571 if cmd == '!':
571 if cmd == '!':
572 continue
572 continue
573 mf = matchmod.match(self.root, '', [pat])
573 mf = matchmod.match(self.root, '', [pat])
574 fn = None
574 fn = None
575 params = cmd
575 params = cmd
576 for name, filterfn in self._datafilters.iteritems():
576 for name, filterfn in self._datafilters.iteritems():
577 if cmd.startswith(name):
577 if cmd.startswith(name):
578 fn = filterfn
578 fn = filterfn
579 params = cmd[len(name):].lstrip()
579 params = cmd[len(name):].lstrip()
580 break
580 break
581 if not fn:
581 if not fn:
582 fn = lambda s, c, **kwargs: util.filter(s, c)
582 fn = lambda s, c, **kwargs: util.filter(s, c)
583 # Wrap old filters not supporting keyword arguments
583 # Wrap old filters not supporting keyword arguments
584 if not inspect.getargspec(fn)[2]:
584 if not inspect.getargspec(fn)[2]:
585 oldfn = fn
585 oldfn = fn
586 fn = lambda s, c, **kwargs: oldfn(s, c)
586 fn = lambda s, c, **kwargs: oldfn(s, c)
587 l.append((mf, fn, params))
587 l.append((mf, fn, params))
588 self.filterpats[filter] = l
588 self.filterpats[filter] = l
589 return self.filterpats[filter]
589
590
590 def _filter(self, filter, filename, data):
591 def _filter(self, filter, filename, data):
591 self._loadfilter(filter)
592 for mf, fn, cmd in self._loadfilter[filter]:
592
593 for mf, fn, cmd in self.filterpats[filter]:
594 if mf(filename):
593 if mf(filename):
595 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
594 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
596 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
595 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
597 break
596 break
598
597
599 return data
598 return data
600
599
601 def adddatafilter(self, name, filter):
600 def adddatafilter(self, name, filter):
602 self._datafilters[name] = filter
601 self._datafilters[name] = filter
603
602
604 def wread(self, filename):
603 def wread(self, filename):
605 if self._link(filename):
604 if self._link(filename):
606 data = os.readlink(self.wjoin(filename))
605 data = os.readlink(self.wjoin(filename))
607 else:
606 else:
608 data = self.wopener(filename, 'r').read()
607 data = self.wopener(filename, 'r').read()
609 return self._filter("encode", filename, data)
608 return self._filter("encode", filename, data)
610
609
611 def wwrite(self, filename, data, flags):
610 def wwrite(self, filename, data, flags):
612 data = self._filter("decode", filename, data)
611 data = self._filter("decode", filename, data)
613 try:
612 try:
614 os.unlink(self.wjoin(filename))
613 os.unlink(self.wjoin(filename))
615 except OSError:
614 except OSError:
616 pass
615 pass
617 if 'l' in flags:
616 if 'l' in flags:
618 self.wopener.symlink(data, filename)
617 self.wopener.symlink(data, filename)
619 else:
618 else:
620 self.wopener(filename, 'w').write(data)
619 self.wopener(filename, 'w').write(data)
621 if 'x' in flags:
620 if 'x' in flags:
622 util.set_flags(self.wjoin(filename), False, True)
621 util.set_flags(self.wjoin(filename), False, True)
623
622
624 def wwritedata(self, filename, data):
623 def wwritedata(self, filename, data):
625 return self._filter("decode", filename, data)
624 return self._filter("decode", filename, data)
626
625
627 def transaction(self, desc):
626 def transaction(self, desc):
628 tr = self._transref and self._transref() or None
627 tr = self._transref and self._transref() or None
629 if tr and tr.running():
628 if tr and tr.running():
630 return tr.nest()
629 return tr.nest()
631
630
632 # abort here if the journal already exists
631 # abort here if the journal already exists
633 if os.path.exists(self.sjoin("journal")):
632 if os.path.exists(self.sjoin("journal")):
634 raise error.RepoError(
633 raise error.RepoError(
635 _("abandoned transaction found - run hg recover"))
634 _("abandoned transaction found - run hg recover"))
636
635
637 # save dirstate for rollback
636 # save dirstate for rollback
638 try:
637 try:
639 ds = self.opener("dirstate").read()
638 ds = self.opener("dirstate").read()
640 except IOError:
639 except IOError:
641 ds = ""
640 ds = ""
642 self.opener("journal.dirstate", "w").write(ds)
641 self.opener("journal.dirstate", "w").write(ds)
643 self.opener("journal.branch", "w").write(self.dirstate.branch())
642 self.opener("journal.branch", "w").write(self.dirstate.branch())
644 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
643 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
645
644
646 renames = [(self.sjoin("journal"), self.sjoin("undo")),
645 renames = [(self.sjoin("journal"), self.sjoin("undo")),
647 (self.join("journal.dirstate"), self.join("undo.dirstate")),
646 (self.join("journal.dirstate"), self.join("undo.dirstate")),
648 (self.join("journal.branch"), self.join("undo.branch")),
647 (self.join("journal.branch"), self.join("undo.branch")),
649 (self.join("journal.desc"), self.join("undo.desc"))]
648 (self.join("journal.desc"), self.join("undo.desc"))]
650 tr = transaction.transaction(self.ui.warn, self.sopener,
649 tr = transaction.transaction(self.ui.warn, self.sopener,
651 self.sjoin("journal"),
650 self.sjoin("journal"),
652 aftertrans(renames),
651 aftertrans(renames),
653 self.store.createmode)
652 self.store.createmode)
654 self._transref = weakref.ref(tr)
653 self._transref = weakref.ref(tr)
655 return tr
654 return tr
656
655
657 def recover(self):
656 def recover(self):
658 lock = self.lock()
657 lock = self.lock()
659 try:
658 try:
660 if os.path.exists(self.sjoin("journal")):
659 if os.path.exists(self.sjoin("journal")):
661 self.ui.status(_("rolling back interrupted transaction\n"))
660 self.ui.status(_("rolling back interrupted transaction\n"))
662 transaction.rollback(self.sopener, self.sjoin("journal"),
661 transaction.rollback(self.sopener, self.sjoin("journal"),
663 self.ui.warn)
662 self.ui.warn)
664 self.invalidate()
663 self.invalidate()
665 return True
664 return True
666 else:
665 else:
667 self.ui.warn(_("no interrupted transaction available\n"))
666 self.ui.warn(_("no interrupted transaction available\n"))
668 return False
667 return False
669 finally:
668 finally:
670 lock.release()
669 lock.release()
671
670
672 def rollback(self, dryrun=False):
671 def rollback(self, dryrun=False):
673 wlock = lock = None
672 wlock = lock = None
674 try:
673 try:
675 wlock = self.wlock()
674 wlock = self.wlock()
676 lock = self.lock()
675 lock = self.lock()
677 if os.path.exists(self.sjoin("undo")):
676 if os.path.exists(self.sjoin("undo")):
678 try:
677 try:
679 args = self.opener("undo.desc", "r").read().splitlines()
678 args = self.opener("undo.desc", "r").read().splitlines()
680 if len(args) >= 3 and self.ui.verbose:
679 if len(args) >= 3 and self.ui.verbose:
681 desc = _("rolling back to revision %s"
680 desc = _("rolling back to revision %s"
682 " (undo %s: %s)\n") % (
681 " (undo %s: %s)\n") % (
683 int(args[0]) - 1, args[1], args[2])
682 int(args[0]) - 1, args[1], args[2])
684 elif len(args) >= 2:
683 elif len(args) >= 2:
685 desc = _("rolling back to revision %s (undo %s)\n") % (
684 desc = _("rolling back to revision %s (undo %s)\n") % (
686 int(args[0]) - 1, args[1])
685 int(args[0]) - 1, args[1])
687 except IOError:
686 except IOError:
688 desc = _("rolling back unknown transaction\n")
687 desc = _("rolling back unknown transaction\n")
689 self.ui.status(desc)
688 self.ui.status(desc)
690 if dryrun:
689 if dryrun:
691 return
690 return
692 transaction.rollback(self.sopener, self.sjoin("undo"),
691 transaction.rollback(self.sopener, self.sjoin("undo"),
693 self.ui.warn)
692 self.ui.warn)
694 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
693 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
695 try:
694 try:
696 branch = self.opener("undo.branch").read()
695 branch = self.opener("undo.branch").read()
697 self.dirstate.setbranch(branch)
696 self.dirstate.setbranch(branch)
698 except IOError:
697 except IOError:
699 self.ui.warn(_("Named branch could not be reset, "
698 self.ui.warn(_("Named branch could not be reset, "
700 "current branch still is: %s\n")
699 "current branch still is: %s\n")
701 % encoding.tolocal(self.dirstate.branch()))
700 % encoding.tolocal(self.dirstate.branch()))
702 self.invalidate()
701 self.invalidate()
703 self.dirstate.invalidate()
702 self.dirstate.invalidate()
704 self.destroyed()
703 self.destroyed()
705 else:
704 else:
706 self.ui.warn(_("no rollback information available\n"))
705 self.ui.warn(_("no rollback information available\n"))
707 return 1
706 return 1
708 finally:
707 finally:
709 release(lock, wlock)
708 release(lock, wlock)
710
709
711 def invalidatecaches(self):
710 def invalidatecaches(self):
712 self._tags = None
711 self._tags = None
713 self._tagtypes = None
712 self._tagtypes = None
714 self.nodetagscache = None
713 self.nodetagscache = None
715 self._branchcache = None # in UTF-8
714 self._branchcache = None # in UTF-8
716 self._branchcachetip = None
715 self._branchcachetip = None
717
716
718 def invalidate(self):
717 def invalidate(self):
719 for a in "changelog manifest".split():
718 for a in "changelog manifest".split():
720 if a in self.__dict__:
719 if a in self.__dict__:
721 delattr(self, a)
720 delattr(self, a)
722 self.invalidatecaches()
721 self.invalidatecaches()
723
722
724 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
723 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
725 try:
724 try:
726 l = lock.lock(lockname, 0, releasefn, desc=desc)
725 l = lock.lock(lockname, 0, releasefn, desc=desc)
727 except error.LockHeld, inst:
726 except error.LockHeld, inst:
728 if not wait:
727 if not wait:
729 raise
728 raise
730 self.ui.warn(_("waiting for lock on %s held by %r\n") %
729 self.ui.warn(_("waiting for lock on %s held by %r\n") %
731 (desc, inst.locker))
730 (desc, inst.locker))
732 # default to 600 seconds timeout
731 # default to 600 seconds timeout
733 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
732 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
734 releasefn, desc=desc)
733 releasefn, desc=desc)
735 if acquirefn:
734 if acquirefn:
736 acquirefn()
735 acquirefn()
737 return l
736 return l
738
737
739 def lock(self, wait=True):
738 def lock(self, wait=True):
740 '''Lock the repository store (.hg/store) and return a weak reference
739 '''Lock the repository store (.hg/store) and return a weak reference
741 to the lock. Use this before modifying the store (e.g. committing or
740 to the lock. Use this before modifying the store (e.g. committing or
742 stripping). If you are opening a transaction, get a lock as well.)'''
741 stripping). If you are opening a transaction, get a lock as well.)'''
743 l = self._lockref and self._lockref()
742 l = self._lockref and self._lockref()
744 if l is not None and l.held:
743 if l is not None and l.held:
745 l.lock()
744 l.lock()
746 return l
745 return l
747
746
748 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
747 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
749 _('repository %s') % self.origroot)
748 _('repository %s') % self.origroot)
750 self._lockref = weakref.ref(l)
749 self._lockref = weakref.ref(l)
751 return l
750 return l
752
751
753 def wlock(self, wait=True):
752 def wlock(self, wait=True):
754 '''Lock the non-store parts of the repository (everything under
753 '''Lock the non-store parts of the repository (everything under
755 .hg except .hg/store) and return a weak reference to the lock.
754 .hg except .hg/store) and return a weak reference to the lock.
756 Use this before modifying files in .hg.'''
755 Use this before modifying files in .hg.'''
757 l = self._wlockref and self._wlockref()
756 l = self._wlockref and self._wlockref()
758 if l is not None and l.held:
757 if l is not None and l.held:
759 l.lock()
758 l.lock()
760 return l
759 return l
761
760
762 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
761 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
763 self.dirstate.invalidate, _('working directory of %s') %
762 self.dirstate.invalidate, _('working directory of %s') %
764 self.origroot)
763 self.origroot)
765 self._wlockref = weakref.ref(l)
764 self._wlockref = weakref.ref(l)
766 return l
765 return l
767
766
768 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
767 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
769 """
768 """
770 commit an individual file as part of a larger transaction
769 commit an individual file as part of a larger transaction
771 """
770 """
772
771
773 fname = fctx.path()
772 fname = fctx.path()
774 text = fctx.data()
773 text = fctx.data()
775 flog = self.file(fname)
774 flog = self.file(fname)
776 fparent1 = manifest1.get(fname, nullid)
775 fparent1 = manifest1.get(fname, nullid)
777 fparent2 = fparent2o = manifest2.get(fname, nullid)
776 fparent2 = fparent2o = manifest2.get(fname, nullid)
778
777
779 meta = {}
778 meta = {}
780 copy = fctx.renamed()
779 copy = fctx.renamed()
781 if copy and copy[0] != fname:
780 if copy and copy[0] != fname:
782 # Mark the new revision of this file as a copy of another
781 # Mark the new revision of this file as a copy of another
783 # file. This copy data will effectively act as a parent
782 # file. This copy data will effectively act as a parent
784 # of this new revision. If this is a merge, the first
783 # of this new revision. If this is a merge, the first
785 # parent will be the nullid (meaning "look up the copy data")
784 # parent will be the nullid (meaning "look up the copy data")
786 # and the second one will be the other parent. For example:
785 # and the second one will be the other parent. For example:
787 #
786 #
788 # 0 --- 1 --- 3 rev1 changes file foo
787 # 0 --- 1 --- 3 rev1 changes file foo
789 # \ / rev2 renames foo to bar and changes it
788 # \ / rev2 renames foo to bar and changes it
790 # \- 2 -/ rev3 should have bar with all changes and
789 # \- 2 -/ rev3 should have bar with all changes and
791 # should record that bar descends from
790 # should record that bar descends from
792 # bar in rev2 and foo in rev1
791 # bar in rev2 and foo in rev1
793 #
792 #
794 # this allows this merge to succeed:
793 # this allows this merge to succeed:
795 #
794 #
796 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
795 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
797 # \ / merging rev3 and rev4 should use bar@rev2
796 # \ / merging rev3 and rev4 should use bar@rev2
798 # \- 2 --- 4 as the merge base
797 # \- 2 --- 4 as the merge base
799 #
798 #
800
799
801 cfname = copy[0]
800 cfname = copy[0]
802 crev = manifest1.get(cfname)
801 crev = manifest1.get(cfname)
803 newfparent = fparent2
802 newfparent = fparent2
804
803
805 if manifest2: # branch merge
804 if manifest2: # branch merge
806 if fparent2 == nullid or crev is None: # copied on remote side
805 if fparent2 == nullid or crev is None: # copied on remote side
807 if cfname in manifest2:
806 if cfname in manifest2:
808 crev = manifest2[cfname]
807 crev = manifest2[cfname]
809 newfparent = fparent1
808 newfparent = fparent1
810
809
811 # find source in nearest ancestor if we've lost track
810 # find source in nearest ancestor if we've lost track
812 if not crev:
811 if not crev:
813 self.ui.debug(" %s: searching for copy revision for %s\n" %
812 self.ui.debug(" %s: searching for copy revision for %s\n" %
814 (fname, cfname))
813 (fname, cfname))
815 for ancestor in self['.'].ancestors():
814 for ancestor in self['.'].ancestors():
816 if cfname in ancestor:
815 if cfname in ancestor:
817 crev = ancestor[cfname].filenode()
816 crev = ancestor[cfname].filenode()
818 break
817 break
819
818
820 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
819 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
821 meta["copy"] = cfname
820 meta["copy"] = cfname
822 meta["copyrev"] = hex(crev)
821 meta["copyrev"] = hex(crev)
823 fparent1, fparent2 = nullid, newfparent
822 fparent1, fparent2 = nullid, newfparent
824 elif fparent2 != nullid:
823 elif fparent2 != nullid:
825 # is one parent an ancestor of the other?
824 # is one parent an ancestor of the other?
826 fparentancestor = flog.ancestor(fparent1, fparent2)
825 fparentancestor = flog.ancestor(fparent1, fparent2)
827 if fparentancestor == fparent1:
826 if fparentancestor == fparent1:
828 fparent1, fparent2 = fparent2, nullid
827 fparent1, fparent2 = fparent2, nullid
829 elif fparentancestor == fparent2:
828 elif fparentancestor == fparent2:
830 fparent2 = nullid
829 fparent2 = nullid
831
830
832 # is the file changed?
831 # is the file changed?
833 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
832 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
834 changelist.append(fname)
833 changelist.append(fname)
835 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
834 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
836
835
837 # are just the flags changed during merge?
836 # are just the flags changed during merge?
838 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
837 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
839 changelist.append(fname)
838 changelist.append(fname)
840
839
841 return fparent1
840 return fparent1
842
841
843 def commit(self, text="", user=None, date=None, match=None, force=False,
842 def commit(self, text="", user=None, date=None, match=None, force=False,
844 editor=False, extra={}):
843 editor=False, extra={}):
845 """Add a new revision to current repository.
844 """Add a new revision to current repository.
846
845
847 Revision information is gathered from the working directory,
846 Revision information is gathered from the working directory,
848 match can be used to filter the committed files. If editor is
847 match can be used to filter the committed files. If editor is
849 supplied, it is called to get a commit message.
848 supplied, it is called to get a commit message.
850 """
849 """
851
850
852 def fail(f, msg):
851 def fail(f, msg):
853 raise util.Abort('%s: %s' % (f, msg))
852 raise util.Abort('%s: %s' % (f, msg))
854
853
855 if not match:
854 if not match:
856 match = matchmod.always(self.root, '')
855 match = matchmod.always(self.root, '')
857
856
858 if not force:
857 if not force:
859 vdirs = []
858 vdirs = []
860 match.dir = vdirs.append
859 match.dir = vdirs.append
861 match.bad = fail
860 match.bad = fail
862
861
863 wlock = self.wlock()
862 wlock = self.wlock()
864 try:
863 try:
865 wctx = self[None]
864 wctx = self[None]
866 merge = len(wctx.parents()) > 1
865 merge = len(wctx.parents()) > 1
867
866
868 if (not force and merge and match and
867 if (not force and merge and match and
869 (match.files() or match.anypats())):
868 (match.files() or match.anypats())):
870 raise util.Abort(_('cannot partially commit a merge '
869 raise util.Abort(_('cannot partially commit a merge '
871 '(do not specify files or patterns)'))
870 '(do not specify files or patterns)'))
872
871
873 changes = self.status(match=match, clean=force)
872 changes = self.status(match=match, clean=force)
874 if force:
873 if force:
875 changes[0].extend(changes[6]) # mq may commit unchanged files
874 changes[0].extend(changes[6]) # mq may commit unchanged files
876
875
877 # check subrepos
876 # check subrepos
878 subs = []
877 subs = []
879 removedsubs = set()
878 removedsubs = set()
880 for p in wctx.parents():
879 for p in wctx.parents():
881 removedsubs.update(s for s in p.substate if match(s))
880 removedsubs.update(s for s in p.substate if match(s))
882 for s in wctx.substate:
881 for s in wctx.substate:
883 removedsubs.discard(s)
882 removedsubs.discard(s)
884 if match(s) and wctx.sub(s).dirty():
883 if match(s) and wctx.sub(s).dirty():
885 subs.append(s)
884 subs.append(s)
886 if (subs or removedsubs):
885 if (subs or removedsubs):
887 if (not match('.hgsub') and
886 if (not match('.hgsub') and
888 '.hgsub' in (wctx.modified() + wctx.added())):
887 '.hgsub' in (wctx.modified() + wctx.added())):
889 raise util.Abort(_("can't commit subrepos without .hgsub"))
888 raise util.Abort(_("can't commit subrepos without .hgsub"))
890 if '.hgsubstate' not in changes[0]:
889 if '.hgsubstate' not in changes[0]:
891 changes[0].insert(0, '.hgsubstate')
890 changes[0].insert(0, '.hgsubstate')
892
891
893 # make sure all explicit patterns are matched
892 # make sure all explicit patterns are matched
894 if not force and match.files():
893 if not force and match.files():
895 matched = set(changes[0] + changes[1] + changes[2])
894 matched = set(changes[0] + changes[1] + changes[2])
896
895
897 for f in match.files():
896 for f in match.files():
898 if f == '.' or f in matched or f in wctx.substate:
897 if f == '.' or f in matched or f in wctx.substate:
899 continue
898 continue
900 if f in changes[3]: # missing
899 if f in changes[3]: # missing
901 fail(f, _('file not found!'))
900 fail(f, _('file not found!'))
902 if f in vdirs: # visited directory
901 if f in vdirs: # visited directory
903 d = f + '/'
902 d = f + '/'
904 for mf in matched:
903 for mf in matched:
905 if mf.startswith(d):
904 if mf.startswith(d):
906 break
905 break
907 else:
906 else:
908 fail(f, _("no match under directory!"))
907 fail(f, _("no match under directory!"))
909 elif f not in self.dirstate:
908 elif f not in self.dirstate:
910 fail(f, _("file not tracked!"))
909 fail(f, _("file not tracked!"))
911
910
912 if (not force and not extra.get("close") and not merge
911 if (not force and not extra.get("close") and not merge
913 and not (changes[0] or changes[1] or changes[2])
912 and not (changes[0] or changes[1] or changes[2])
914 and wctx.branch() == wctx.p1().branch()):
913 and wctx.branch() == wctx.p1().branch()):
915 return None
914 return None
916
915
917 ms = mergemod.mergestate(self)
916 ms = mergemod.mergestate(self)
918 for f in changes[0]:
917 for f in changes[0]:
919 if f in ms and ms[f] == 'u':
918 if f in ms and ms[f] == 'u':
920 raise util.Abort(_("unresolved merge conflicts "
919 raise util.Abort(_("unresolved merge conflicts "
921 "(see hg resolve)"))
920 "(see hg resolve)"))
922
921
923 cctx = context.workingctx(self, text, user, date, extra, changes)
922 cctx = context.workingctx(self, text, user, date, extra, changes)
924 if editor:
923 if editor:
925 cctx._text = editor(self, cctx, subs)
924 cctx._text = editor(self, cctx, subs)
926 edited = (text != cctx._text)
925 edited = (text != cctx._text)
927
926
928 # commit subs
927 # commit subs
929 if subs or removedsubs:
928 if subs or removedsubs:
930 state = wctx.substate.copy()
929 state = wctx.substate.copy()
931 for s in sorted(subs):
930 for s in sorted(subs):
932 sub = wctx.sub(s)
931 sub = wctx.sub(s)
933 self.ui.status(_('committing subrepository %s\n') %
932 self.ui.status(_('committing subrepository %s\n') %
934 subrepo.relpath(sub))
933 subrepo.relpath(sub))
935 sr = sub.commit(cctx._text, user, date)
934 sr = sub.commit(cctx._text, user, date)
936 state[s] = (state[s][0], sr)
935 state[s] = (state[s][0], sr)
937 subrepo.writestate(self, state)
936 subrepo.writestate(self, state)
938
937
939 # Save commit message in case this transaction gets rolled back
938 # Save commit message in case this transaction gets rolled back
940 # (e.g. by a pretxncommit hook). Leave the content alone on
939 # (e.g. by a pretxncommit hook). Leave the content alone on
941 # the assumption that the user will use the same editor again.
940 # the assumption that the user will use the same editor again.
942 msgfile = self.opener('last-message.txt', 'wb')
941 msgfile = self.opener('last-message.txt', 'wb')
943 msgfile.write(cctx._text)
942 msgfile.write(cctx._text)
944 msgfile.close()
943 msgfile.close()
945
944
946 p1, p2 = self.dirstate.parents()
945 p1, p2 = self.dirstate.parents()
947 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
946 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
948 try:
947 try:
949 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
948 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
950 ret = self.commitctx(cctx, True)
949 ret = self.commitctx(cctx, True)
951 except:
950 except:
952 if edited:
951 if edited:
953 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
952 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
954 self.ui.write(
953 self.ui.write(
955 _('note: commit message saved in %s\n') % msgfn)
954 _('note: commit message saved in %s\n') % msgfn)
956 raise
955 raise
957
956
958 # update dirstate and mergestate
957 # update dirstate and mergestate
959 for f in changes[0] + changes[1]:
958 for f in changes[0] + changes[1]:
960 self.dirstate.normal(f)
959 self.dirstate.normal(f)
961 for f in changes[2]:
960 for f in changes[2]:
962 self.dirstate.forget(f)
961 self.dirstate.forget(f)
963 self.dirstate.setparents(ret)
962 self.dirstate.setparents(ret)
964 ms.reset()
963 ms.reset()
965 finally:
964 finally:
966 wlock.release()
965 wlock.release()
967
966
968 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
967 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
969 return ret
968 return ret
970
969
971 def commitctx(self, ctx, error=False):
970 def commitctx(self, ctx, error=False):
972 """Add a new revision to current repository.
971 """Add a new revision to current repository.
973 Revision information is passed via the context argument.
972 Revision information is passed via the context argument.
974 """
973 """
975
974
976 tr = lock = None
975 tr = lock = None
977 removed = ctx.removed()
976 removed = ctx.removed()
978 p1, p2 = ctx.p1(), ctx.p2()
977 p1, p2 = ctx.p1(), ctx.p2()
979 m1 = p1.manifest().copy()
978 m1 = p1.manifest().copy()
980 m2 = p2.manifest()
979 m2 = p2.manifest()
981 user = ctx.user()
980 user = ctx.user()
982
981
983 lock = self.lock()
982 lock = self.lock()
984 try:
983 try:
985 tr = self.transaction("commit")
984 tr = self.transaction("commit")
986 trp = weakref.proxy(tr)
985 trp = weakref.proxy(tr)
987
986
988 # check in files
987 # check in files
989 new = {}
988 new = {}
990 changed = []
989 changed = []
991 linkrev = len(self)
990 linkrev = len(self)
992 for f in sorted(ctx.modified() + ctx.added()):
991 for f in sorted(ctx.modified() + ctx.added()):
993 self.ui.note(f + "\n")
992 self.ui.note(f + "\n")
994 try:
993 try:
995 fctx = ctx[f]
994 fctx = ctx[f]
996 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
995 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
997 changed)
996 changed)
998 m1.set(f, fctx.flags())
997 m1.set(f, fctx.flags())
999 except OSError, inst:
998 except OSError, inst:
1000 self.ui.warn(_("trouble committing %s!\n") % f)
999 self.ui.warn(_("trouble committing %s!\n") % f)
1001 raise
1000 raise
1002 except IOError, inst:
1001 except IOError, inst:
1003 errcode = getattr(inst, 'errno', errno.ENOENT)
1002 errcode = getattr(inst, 'errno', errno.ENOENT)
1004 if error or errcode and errcode != errno.ENOENT:
1003 if error or errcode and errcode != errno.ENOENT:
1005 self.ui.warn(_("trouble committing %s!\n") % f)
1004 self.ui.warn(_("trouble committing %s!\n") % f)
1006 raise
1005 raise
1007 else:
1006 else:
1008 removed.append(f)
1007 removed.append(f)
1009
1008
1010 # update manifest
1009 # update manifest
1011 m1.update(new)
1010 m1.update(new)
1012 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1011 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1013 drop = [f for f in removed if f in m1]
1012 drop = [f for f in removed if f in m1]
1014 for f in drop:
1013 for f in drop:
1015 del m1[f]
1014 del m1[f]
1016 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1015 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1017 p2.manifestnode(), (new, drop))
1016 p2.manifestnode(), (new, drop))
1018
1017
1019 # update changelog
1018 # update changelog
1020 self.changelog.delayupdate()
1019 self.changelog.delayupdate()
1021 n = self.changelog.add(mn, changed + removed, ctx.description(),
1020 n = self.changelog.add(mn, changed + removed, ctx.description(),
1022 trp, p1.node(), p2.node(),
1021 trp, p1.node(), p2.node(),
1023 user, ctx.date(), ctx.extra().copy())
1022 user, ctx.date(), ctx.extra().copy())
1024 p = lambda: self.changelog.writepending() and self.root or ""
1023 p = lambda: self.changelog.writepending() and self.root or ""
1025 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1024 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1026 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1025 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1027 parent2=xp2, pending=p)
1026 parent2=xp2, pending=p)
1028 self.changelog.finalize(trp)
1027 self.changelog.finalize(trp)
1029 tr.close()
1028 tr.close()
1030
1029
1031 if self._branchcache:
1030 if self._branchcache:
1032 self.updatebranchcache()
1031 self.updatebranchcache()
1033 return n
1032 return n
1034 finally:
1033 finally:
1035 if tr:
1034 if tr:
1036 tr.release()
1035 tr.release()
1037 lock.release()
1036 lock.release()
1038
1037
1039 def destroyed(self):
1038 def destroyed(self):
1040 '''Inform the repository that nodes have been destroyed.
1039 '''Inform the repository that nodes have been destroyed.
1041 Intended for use by strip and rollback, so there's a common
1040 Intended for use by strip and rollback, so there's a common
1042 place for anything that has to be done after destroying history.'''
1041 place for anything that has to be done after destroying history.'''
1043 # XXX it might be nice if we could take the list of destroyed
1042 # XXX it might be nice if we could take the list of destroyed
1044 # nodes, but I don't see an easy way for rollback() to do that
1043 # nodes, but I don't see an easy way for rollback() to do that
1045
1044
1046 # Ensure the persistent tag cache is updated. Doing it now
1045 # Ensure the persistent tag cache is updated. Doing it now
1047 # means that the tag cache only has to worry about destroyed
1046 # means that the tag cache only has to worry about destroyed
1048 # heads immediately after a strip/rollback. That in turn
1047 # heads immediately after a strip/rollback. That in turn
1049 # guarantees that "cachetip == currenttip" (comparing both rev
1048 # guarantees that "cachetip == currenttip" (comparing both rev
1050 # and node) always means no nodes have been added or destroyed.
1049 # and node) always means no nodes have been added or destroyed.
1051
1050
1052 # XXX this is suboptimal when qrefresh'ing: we strip the current
1051 # XXX this is suboptimal when qrefresh'ing: we strip the current
1053 # head, refresh the tag cache, then immediately add a new head.
1052 # head, refresh the tag cache, then immediately add a new head.
1054 # But I think doing it this way is necessary for the "instant
1053 # But I think doing it this way is necessary for the "instant
1055 # tag cache retrieval" case to work.
1054 # tag cache retrieval" case to work.
1056 self.invalidatecaches()
1055 self.invalidatecaches()
1057
1056
1058 def walk(self, match, node=None):
1057 def walk(self, match, node=None):
1059 '''
1058 '''
1060 walk recursively through the directory tree or a given
1059 walk recursively through the directory tree or a given
1061 changeset, finding all files matched by the match
1060 changeset, finding all files matched by the match
1062 function
1061 function
1063 '''
1062 '''
1064 return self[node].walk(match)
1063 return self[node].walk(match)
1065
1064
1066 def status(self, node1='.', node2=None, match=None,
1065 def status(self, node1='.', node2=None, match=None,
1067 ignored=False, clean=False, unknown=False,
1066 ignored=False, clean=False, unknown=False,
1068 listsubrepos=False):
1067 listsubrepos=False):
1069 """return status of files between two nodes or node and working directory
1068 """return status of files between two nodes or node and working directory
1070
1069
1071 If node1 is None, use the first dirstate parent instead.
1070 If node1 is None, use the first dirstate parent instead.
1072 If node2 is None, compare node1 with working directory.
1071 If node2 is None, compare node1 with working directory.
1073 """
1072 """
1074
1073
1075 def mfmatches(ctx):
1074 def mfmatches(ctx):
1076 mf = ctx.manifest().copy()
1075 mf = ctx.manifest().copy()
1077 for fn in mf.keys():
1076 for fn in mf.keys():
1078 if not match(fn):
1077 if not match(fn):
1079 del mf[fn]
1078 del mf[fn]
1080 return mf
1079 return mf
1081
1080
1082 if isinstance(node1, context.changectx):
1081 if isinstance(node1, context.changectx):
1083 ctx1 = node1
1082 ctx1 = node1
1084 else:
1083 else:
1085 ctx1 = self[node1]
1084 ctx1 = self[node1]
1086 if isinstance(node2, context.changectx):
1085 if isinstance(node2, context.changectx):
1087 ctx2 = node2
1086 ctx2 = node2
1088 else:
1087 else:
1089 ctx2 = self[node2]
1088 ctx2 = self[node2]
1090
1089
1091 working = ctx2.rev() is None
1090 working = ctx2.rev() is None
1092 parentworking = working and ctx1 == self['.']
1091 parentworking = working and ctx1 == self['.']
1093 match = match or matchmod.always(self.root, self.getcwd())
1092 match = match or matchmod.always(self.root, self.getcwd())
1094 listignored, listclean, listunknown = ignored, clean, unknown
1093 listignored, listclean, listunknown = ignored, clean, unknown
1095
1094
1096 # load earliest manifest first for caching reasons
1095 # load earliest manifest first for caching reasons
1097 if not working and ctx2.rev() < ctx1.rev():
1096 if not working and ctx2.rev() < ctx1.rev():
1098 ctx2.manifest()
1097 ctx2.manifest()
1099
1098
1100 if not parentworking:
1099 if not parentworking:
1101 def bad(f, msg):
1100 def bad(f, msg):
1102 if f not in ctx1:
1101 if f not in ctx1:
1103 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1102 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1104 match.bad = bad
1103 match.bad = bad
1105
1104
1106 if working: # we need to scan the working dir
1105 if working: # we need to scan the working dir
1107 subrepos = []
1106 subrepos = []
1108 if '.hgsub' in self.dirstate:
1107 if '.hgsub' in self.dirstate:
1109 subrepos = ctx1.substate.keys()
1108 subrepos = ctx1.substate.keys()
1110 s = self.dirstate.status(match, subrepos, listignored,
1109 s = self.dirstate.status(match, subrepos, listignored,
1111 listclean, listunknown)
1110 listclean, listunknown)
1112 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1111 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1113
1112
1114 # check for any possibly clean files
1113 # check for any possibly clean files
1115 if parentworking and cmp:
1114 if parentworking and cmp:
1116 fixup = []
1115 fixup = []
1117 # do a full compare of any files that might have changed
1116 # do a full compare of any files that might have changed
1118 for f in sorted(cmp):
1117 for f in sorted(cmp):
1119 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1118 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1120 or ctx1[f].cmp(ctx2[f])):
1119 or ctx1[f].cmp(ctx2[f])):
1121 modified.append(f)
1120 modified.append(f)
1122 else:
1121 else:
1123 fixup.append(f)
1122 fixup.append(f)
1124
1123
1125 # update dirstate for files that are actually clean
1124 # update dirstate for files that are actually clean
1126 if fixup:
1125 if fixup:
1127 if listclean:
1126 if listclean:
1128 clean += fixup
1127 clean += fixup
1129
1128
1130 try:
1129 try:
1131 # updating the dirstate is optional
1130 # updating the dirstate is optional
1132 # so we don't wait on the lock
1131 # so we don't wait on the lock
1133 wlock = self.wlock(False)
1132 wlock = self.wlock(False)
1134 try:
1133 try:
1135 for f in fixup:
1134 for f in fixup:
1136 self.dirstate.normal(f)
1135 self.dirstate.normal(f)
1137 finally:
1136 finally:
1138 wlock.release()
1137 wlock.release()
1139 except error.LockError:
1138 except error.LockError:
1140 pass
1139 pass
1141
1140
1142 if not parentworking:
1141 if not parentworking:
1143 mf1 = mfmatches(ctx1)
1142 mf1 = mfmatches(ctx1)
1144 if working:
1143 if working:
1145 # we are comparing working dir against non-parent
1144 # we are comparing working dir against non-parent
1146 # generate a pseudo-manifest for the working dir
1145 # generate a pseudo-manifest for the working dir
1147 mf2 = mfmatches(self['.'])
1146 mf2 = mfmatches(self['.'])
1148 for f in cmp + modified + added:
1147 for f in cmp + modified + added:
1149 mf2[f] = None
1148 mf2[f] = None
1150 mf2.set(f, ctx2.flags(f))
1149 mf2.set(f, ctx2.flags(f))
1151 for f in removed:
1150 for f in removed:
1152 if f in mf2:
1151 if f in mf2:
1153 del mf2[f]
1152 del mf2[f]
1154 else:
1153 else:
1155 # we are comparing two revisions
1154 # we are comparing two revisions
1156 deleted, unknown, ignored = [], [], []
1155 deleted, unknown, ignored = [], [], []
1157 mf2 = mfmatches(ctx2)
1156 mf2 = mfmatches(ctx2)
1158
1157
1159 modified, added, clean = [], [], []
1158 modified, added, clean = [], [], []
1160 for fn in mf2:
1159 for fn in mf2:
1161 if fn in mf1:
1160 if fn in mf1:
1162 if (mf1.flags(fn) != mf2.flags(fn) or
1161 if (mf1.flags(fn) != mf2.flags(fn) or
1163 (mf1[fn] != mf2[fn] and
1162 (mf1[fn] != mf2[fn] and
1164 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1163 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1165 modified.append(fn)
1164 modified.append(fn)
1166 elif listclean:
1165 elif listclean:
1167 clean.append(fn)
1166 clean.append(fn)
1168 del mf1[fn]
1167 del mf1[fn]
1169 else:
1168 else:
1170 added.append(fn)
1169 added.append(fn)
1171 removed = mf1.keys()
1170 removed = mf1.keys()
1172
1171
1173 r = modified, added, removed, deleted, unknown, ignored, clean
1172 r = modified, added, removed, deleted, unknown, ignored, clean
1174
1173
1175 if listsubrepos:
1174 if listsubrepos:
1176 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1175 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1177 if working:
1176 if working:
1178 rev2 = None
1177 rev2 = None
1179 else:
1178 else:
1180 rev2 = ctx2.substate[subpath][1]
1179 rev2 = ctx2.substate[subpath][1]
1181 try:
1180 try:
1182 submatch = matchmod.narrowmatcher(subpath, match)
1181 submatch = matchmod.narrowmatcher(subpath, match)
1183 s = sub.status(rev2, match=submatch, ignored=listignored,
1182 s = sub.status(rev2, match=submatch, ignored=listignored,
1184 clean=listclean, unknown=listunknown,
1183 clean=listclean, unknown=listunknown,
1185 listsubrepos=True)
1184 listsubrepos=True)
1186 for rfiles, sfiles in zip(r, s):
1185 for rfiles, sfiles in zip(r, s):
1187 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1186 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1188 except error.LookupError:
1187 except error.LookupError:
1189 self.ui.status(_("skipping missing subrepository: %s\n")
1188 self.ui.status(_("skipping missing subrepository: %s\n")
1190 % subpath)
1189 % subpath)
1191
1190
1192 [l.sort() for l in r]
1191 [l.sort() for l in r]
1193 return r
1192 return r
1194
1193
1195 def heads(self, start=None):
1194 def heads(self, start=None):
1196 heads = self.changelog.heads(start)
1195 heads = self.changelog.heads(start)
1197 # sort the output in rev descending order
1196 # sort the output in rev descending order
1198 heads = [(-self.changelog.rev(h), h) for h in heads]
1197 heads = [(-self.changelog.rev(h), h) for h in heads]
1199 return [n for (r, n) in sorted(heads)]
1198 return [n for (r, n) in sorted(heads)]
1200
1199
1201 def branchheads(self, branch=None, start=None, closed=False):
1200 def branchheads(self, branch=None, start=None, closed=False):
1202 '''return a (possibly filtered) list of heads for the given branch
1201 '''return a (possibly filtered) list of heads for the given branch
1203
1202
1204 Heads are returned in topological order, from newest to oldest.
1203 Heads are returned in topological order, from newest to oldest.
1205 If branch is None, use the dirstate branch.
1204 If branch is None, use the dirstate branch.
1206 If start is not None, return only heads reachable from start.
1205 If start is not None, return only heads reachable from start.
1207 If closed is True, return heads that are marked as closed as well.
1206 If closed is True, return heads that are marked as closed as well.
1208 '''
1207 '''
1209 if branch is None:
1208 if branch is None:
1210 branch = self[None].branch()
1209 branch = self[None].branch()
1211 branches = self.branchmap()
1210 branches = self.branchmap()
1212 if branch not in branches:
1211 if branch not in branches:
1213 return []
1212 return []
1214 # the cache returns heads ordered lowest to highest
1213 # the cache returns heads ordered lowest to highest
1215 bheads = list(reversed(branches[branch]))
1214 bheads = list(reversed(branches[branch]))
1216 if start is not None:
1215 if start is not None:
1217 # filter out the heads that cannot be reached from startrev
1216 # filter out the heads that cannot be reached from startrev
1218 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1217 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1219 bheads = [h for h in bheads if h in fbheads]
1218 bheads = [h for h in bheads if h in fbheads]
1220 if not closed:
1219 if not closed:
1221 bheads = [h for h in bheads if
1220 bheads = [h for h in bheads if
1222 ('close' not in self.changelog.read(h)[5])]
1221 ('close' not in self.changelog.read(h)[5])]
1223 return bheads
1222 return bheads
1224
1223
1225 def branches(self, nodes):
1224 def branches(self, nodes):
1226 if not nodes:
1225 if not nodes:
1227 nodes = [self.changelog.tip()]
1226 nodes = [self.changelog.tip()]
1228 b = []
1227 b = []
1229 for n in nodes:
1228 for n in nodes:
1230 t = n
1229 t = n
1231 while 1:
1230 while 1:
1232 p = self.changelog.parents(n)
1231 p = self.changelog.parents(n)
1233 if p[1] != nullid or p[0] == nullid:
1232 if p[1] != nullid or p[0] == nullid:
1234 b.append((t, n, p[0], p[1]))
1233 b.append((t, n, p[0], p[1]))
1235 break
1234 break
1236 n = p[0]
1235 n = p[0]
1237 return b
1236 return b
1238
1237
1239 def between(self, pairs):
1238 def between(self, pairs):
1240 r = []
1239 r = []
1241
1240
1242 for top, bottom in pairs:
1241 for top, bottom in pairs:
1243 n, l, i = top, [], 0
1242 n, l, i = top, [], 0
1244 f = 1
1243 f = 1
1245
1244
1246 while n != bottom and n != nullid:
1245 while n != bottom and n != nullid:
1247 p = self.changelog.parents(n)[0]
1246 p = self.changelog.parents(n)[0]
1248 if i == f:
1247 if i == f:
1249 l.append(n)
1248 l.append(n)
1250 f = f * 2
1249 f = f * 2
1251 n = p
1250 n = p
1252 i += 1
1251 i += 1
1253
1252
1254 r.append(l)
1253 r.append(l)
1255
1254
1256 return r
1255 return r
1257
1256
1258 def pull(self, remote, heads=None, force=False):
1257 def pull(self, remote, heads=None, force=False):
1259 lock = self.lock()
1258 lock = self.lock()
1260 try:
1259 try:
1261 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1260 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1262 force=force)
1261 force=force)
1263 common, fetch, rheads = tmp
1262 common, fetch, rheads = tmp
1264 if not fetch:
1263 if not fetch:
1265 self.ui.status(_("no changes found\n"))
1264 self.ui.status(_("no changes found\n"))
1266 return 0
1265 return 0
1267
1266
1268 if fetch == [nullid]:
1267 if fetch == [nullid]:
1269 self.ui.status(_("requesting all changes\n"))
1268 self.ui.status(_("requesting all changes\n"))
1270 elif heads is None and remote.capable('changegroupsubset'):
1269 elif heads is None and remote.capable('changegroupsubset'):
1271 # issue1320, avoid a race if remote changed after discovery
1270 # issue1320, avoid a race if remote changed after discovery
1272 heads = rheads
1271 heads = rheads
1273
1272
1274 if heads is None:
1273 if heads is None:
1275 cg = remote.changegroup(fetch, 'pull')
1274 cg = remote.changegroup(fetch, 'pull')
1276 else:
1275 else:
1277 if not remote.capable('changegroupsubset'):
1276 if not remote.capable('changegroupsubset'):
1278 raise util.Abort(_("partial pull cannot be done because "
1277 raise util.Abort(_("partial pull cannot be done because "
1279 "other repository doesn't support "
1278 "other repository doesn't support "
1280 "changegroupsubset."))
1279 "changegroupsubset."))
1281 cg = remote.changegroupsubset(fetch, heads, 'pull')
1280 cg = remote.changegroupsubset(fetch, heads, 'pull')
1282 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1281 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1283 finally:
1282 finally:
1284 lock.release()
1283 lock.release()
1285
1284
1286 def push(self, remote, force=False, revs=None, newbranch=False):
1285 def push(self, remote, force=False, revs=None, newbranch=False):
1287 '''Push outgoing changesets (limited by revs) from the current
1286 '''Push outgoing changesets (limited by revs) from the current
1288 repository to remote. Return an integer:
1287 repository to remote. Return an integer:
1289 - 0 means HTTP error *or* nothing to push
1288 - 0 means HTTP error *or* nothing to push
1290 - 1 means we pushed and remote head count is unchanged *or*
1289 - 1 means we pushed and remote head count is unchanged *or*
1291 we have outgoing changesets but refused to push
1290 we have outgoing changesets but refused to push
1292 - other values as described by addchangegroup()
1291 - other values as described by addchangegroup()
1293 '''
1292 '''
1294 # there are two ways to push to remote repo:
1293 # there are two ways to push to remote repo:
1295 #
1294 #
1296 # addchangegroup assumes local user can lock remote
1295 # addchangegroup assumes local user can lock remote
1297 # repo (local filesystem, old ssh servers).
1296 # repo (local filesystem, old ssh servers).
1298 #
1297 #
1299 # unbundle assumes local user cannot lock remote repo (new ssh
1298 # unbundle assumes local user cannot lock remote repo (new ssh
1300 # servers, http servers).
1299 # servers, http servers).
1301
1300
1302 lock = None
1301 lock = None
1303 unbundle = remote.capable('unbundle')
1302 unbundle = remote.capable('unbundle')
1304 if not unbundle:
1303 if not unbundle:
1305 lock = remote.lock()
1304 lock = remote.lock()
1306 try:
1305 try:
1307 ret = discovery.prepush(self, remote, force, revs, newbranch)
1306 ret = discovery.prepush(self, remote, force, revs, newbranch)
1308 if ret[0] is None:
1307 if ret[0] is None:
1309 # and here we return 0 for "nothing to push" or 1 for
1308 # and here we return 0 for "nothing to push" or 1 for
1310 # "something to push but I refuse"
1309 # "something to push but I refuse"
1311 return ret[1]
1310 return ret[1]
1312
1311
1313 cg, remote_heads = ret
1312 cg, remote_heads = ret
1314 if unbundle:
1313 if unbundle:
1315 # local repo finds heads on server, finds out what revs it must
1314 # local repo finds heads on server, finds out what revs it must
1316 # push. once revs transferred, if server finds it has
1315 # push. once revs transferred, if server finds it has
1317 # different heads (someone else won commit/push race), server
1316 # different heads (someone else won commit/push race), server
1318 # aborts.
1317 # aborts.
1319 if force:
1318 if force:
1320 remote_heads = ['force']
1319 remote_heads = ['force']
1321 # ssh: return remote's addchangegroup()
1320 # ssh: return remote's addchangegroup()
1322 # http: return remote's addchangegroup() or 0 for error
1321 # http: return remote's addchangegroup() or 0 for error
1323 return remote.unbundle(cg, remote_heads, 'push')
1322 return remote.unbundle(cg, remote_heads, 'push')
1324 else:
1323 else:
1325 # we return an integer indicating remote head count change
1324 # we return an integer indicating remote head count change
1326 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1325 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1327 finally:
1326 finally:
1328 if lock is not None:
1327 if lock is not None:
1329 lock.release()
1328 lock.release()
1330
1329
1331 def changegroupinfo(self, nodes, source):
1330 def changegroupinfo(self, nodes, source):
1332 if self.ui.verbose or source == 'bundle':
1331 if self.ui.verbose or source == 'bundle':
1333 self.ui.status(_("%d changesets found\n") % len(nodes))
1332 self.ui.status(_("%d changesets found\n") % len(nodes))
1334 if self.ui.debugflag:
1333 if self.ui.debugflag:
1335 self.ui.debug("list of changesets:\n")
1334 self.ui.debug("list of changesets:\n")
1336 for node in nodes:
1335 for node in nodes:
1337 self.ui.debug("%s\n" % hex(node))
1336 self.ui.debug("%s\n" % hex(node))
1338
1337
1339 def changegroupsubset(self, bases, heads, source, extranodes=None):
1338 def changegroupsubset(self, bases, heads, source, extranodes=None):
1340 """Compute a changegroup consisting of all the nodes that are
1339 """Compute a changegroup consisting of all the nodes that are
1341 descendents of any of the bases and ancestors of any of the heads.
1340 descendents of any of the bases and ancestors of any of the heads.
1342 Return a chunkbuffer object whose read() method will return
1341 Return a chunkbuffer object whose read() method will return
1343 successive changegroup chunks.
1342 successive changegroup chunks.
1344
1343
1345 It is fairly complex as determining which filenodes and which
1344 It is fairly complex as determining which filenodes and which
1346 manifest nodes need to be included for the changeset to be complete
1345 manifest nodes need to be included for the changeset to be complete
1347 is non-trivial.
1346 is non-trivial.
1348
1347
1349 Another wrinkle is doing the reverse, figuring out which changeset in
1348 Another wrinkle is doing the reverse, figuring out which changeset in
1350 the changegroup a particular filenode or manifestnode belongs to.
1349 the changegroup a particular filenode or manifestnode belongs to.
1351
1350
1352 The caller can specify some nodes that must be included in the
1351 The caller can specify some nodes that must be included in the
1353 changegroup using the extranodes argument. It should be a dict
1352 changegroup using the extranodes argument. It should be a dict
1354 where the keys are the filenames (or 1 for the manifest), and the
1353 where the keys are the filenames (or 1 for the manifest), and the
1355 values are lists of (node, linknode) tuples, where node is a wanted
1354 values are lists of (node, linknode) tuples, where node is a wanted
1356 node and linknode is the changelog node that should be transmitted as
1355 node and linknode is the changelog node that should be transmitted as
1357 the linkrev.
1356 the linkrev.
1358 """
1357 """
1359
1358
1360 # Set up some initial variables
1359 # Set up some initial variables
1361 # Make it easy to refer to self.changelog
1360 # Make it easy to refer to self.changelog
1362 cl = self.changelog
1361 cl = self.changelog
1363 # Compute the list of changesets in this changegroup.
1362 # Compute the list of changesets in this changegroup.
1364 # Some bases may turn out to be superfluous, and some heads may be
1363 # Some bases may turn out to be superfluous, and some heads may be
1365 # too. nodesbetween will return the minimal set of bases and heads
1364 # too. nodesbetween will return the minimal set of bases and heads
1366 # necessary to re-create the changegroup.
1365 # necessary to re-create the changegroup.
1367 if not bases:
1366 if not bases:
1368 bases = [nullid]
1367 bases = [nullid]
1369 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1368 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1370
1369
1371 if extranodes is None:
1370 if extranodes is None:
1372 # can we go through the fast path ?
1371 # can we go through the fast path ?
1373 heads.sort()
1372 heads.sort()
1374 allheads = self.heads()
1373 allheads = self.heads()
1375 allheads.sort()
1374 allheads.sort()
1376 if heads == allheads:
1375 if heads == allheads:
1377 return self._changegroup(msng_cl_lst, source)
1376 return self._changegroup(msng_cl_lst, source)
1378
1377
1379 # slow path
1378 # slow path
1380 self.hook('preoutgoing', throw=True, source=source)
1379 self.hook('preoutgoing', throw=True, source=source)
1381
1380
1382 self.changegroupinfo(msng_cl_lst, source)
1381 self.changegroupinfo(msng_cl_lst, source)
1383
1382
1384 # We assume that all ancestors of bases are known
1383 # We assume that all ancestors of bases are known
1385 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1384 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1386
1385
1387 # Make it easy to refer to self.manifest
1386 # Make it easy to refer to self.manifest
1388 mnfst = self.manifest
1387 mnfst = self.manifest
1389 # We don't know which manifests are missing yet
1388 # We don't know which manifests are missing yet
1390 msng_mnfst_set = {}
1389 msng_mnfst_set = {}
1391 # Nor do we know which filenodes are missing.
1390 # Nor do we know which filenodes are missing.
1392 msng_filenode_set = {}
1391 msng_filenode_set = {}
1393
1392
1394 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1393 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1395 junk = None
1394 junk = None
1396
1395
1397 # A changeset always belongs to itself, so the changenode lookup
1396 # A changeset always belongs to itself, so the changenode lookup
1398 # function for a changenode is identity.
1397 # function for a changenode is identity.
1399 def identity(x):
1398 def identity(x):
1400 return x
1399 return x
1401
1400
1402 # A function generating function that sets up the initial environment
1401 # A function generating function that sets up the initial environment
1403 # the inner function.
1402 # the inner function.
1404 def filenode_collector(changedfiles):
1403 def filenode_collector(changedfiles):
1405 # This gathers information from each manifestnode included in the
1404 # This gathers information from each manifestnode included in the
1406 # changegroup about which filenodes the manifest node references
1405 # changegroup about which filenodes the manifest node references
1407 # so we can include those in the changegroup too.
1406 # so we can include those in the changegroup too.
1408 #
1407 #
1409 # It also remembers which changenode each filenode belongs to. It
1408 # It also remembers which changenode each filenode belongs to. It
1410 # does this by assuming the a filenode belongs to the changenode
1409 # does this by assuming the a filenode belongs to the changenode
1411 # the first manifest that references it belongs to.
1410 # the first manifest that references it belongs to.
1412 def collect_msng_filenodes(mnfstnode):
1411 def collect_msng_filenodes(mnfstnode):
1413 r = mnfst.rev(mnfstnode)
1412 r = mnfst.rev(mnfstnode)
1414 if mnfst.deltaparent(r) in mnfst.parentrevs(r):
1413 if mnfst.deltaparent(r) in mnfst.parentrevs(r):
1415 # If the previous rev is one of the parents,
1414 # If the previous rev is one of the parents,
1416 # we only need to see a diff.
1415 # we only need to see a diff.
1417 deltamf = mnfst.readdelta(mnfstnode)
1416 deltamf = mnfst.readdelta(mnfstnode)
1418 # For each line in the delta
1417 # For each line in the delta
1419 for f, fnode in deltamf.iteritems():
1418 for f, fnode in deltamf.iteritems():
1420 # And if the file is in the list of files we care
1419 # And if the file is in the list of files we care
1421 # about.
1420 # about.
1422 if f in changedfiles:
1421 if f in changedfiles:
1423 # Get the changenode this manifest belongs to
1422 # Get the changenode this manifest belongs to
1424 clnode = msng_mnfst_set[mnfstnode]
1423 clnode = msng_mnfst_set[mnfstnode]
1425 # Create the set of filenodes for the file if
1424 # Create the set of filenodes for the file if
1426 # there isn't one already.
1425 # there isn't one already.
1427 ndset = msng_filenode_set.setdefault(f, {})
1426 ndset = msng_filenode_set.setdefault(f, {})
1428 # And set the filenode's changelog node to the
1427 # And set the filenode's changelog node to the
1429 # manifest's if it hasn't been set already.
1428 # manifest's if it hasn't been set already.
1430 ndset.setdefault(fnode, clnode)
1429 ndset.setdefault(fnode, clnode)
1431 else:
1430 else:
1432 # Otherwise we need a full manifest.
1431 # Otherwise we need a full manifest.
1433 m = mnfst.read(mnfstnode)
1432 m = mnfst.read(mnfstnode)
1434 # For every file in we care about.
1433 # For every file in we care about.
1435 for f in changedfiles:
1434 for f in changedfiles:
1436 fnode = m.get(f, None)
1435 fnode = m.get(f, None)
1437 # If it's in the manifest
1436 # If it's in the manifest
1438 if fnode is not None:
1437 if fnode is not None:
1439 # See comments above.
1438 # See comments above.
1440 clnode = msng_mnfst_set[mnfstnode]
1439 clnode = msng_mnfst_set[mnfstnode]
1441 ndset = msng_filenode_set.setdefault(f, {})
1440 ndset = msng_filenode_set.setdefault(f, {})
1442 ndset.setdefault(fnode, clnode)
1441 ndset.setdefault(fnode, clnode)
1443 return collect_msng_filenodes
1442 return collect_msng_filenodes
1444
1443
1445 # If we determine that a particular file or manifest node must be a
1444 # If we determine that a particular file or manifest node must be a
1446 # node that the recipient of the changegroup will already have, we can
1445 # node that the recipient of the changegroup will already have, we can
1447 # also assume the recipient will have all the parents. This function
1446 # also assume the recipient will have all the parents. This function
1448 # prunes them from the set of missing nodes.
1447 # prunes them from the set of missing nodes.
1449 def prune(revlog, missingnodes):
1448 def prune(revlog, missingnodes):
1450 hasset = set()
1449 hasset = set()
1451 # If a 'missing' filenode thinks it belongs to a changenode we
1450 # If a 'missing' filenode thinks it belongs to a changenode we
1452 # assume the recipient must have, then the recipient must have
1451 # assume the recipient must have, then the recipient must have
1453 # that filenode.
1452 # that filenode.
1454 for n in missingnodes:
1453 for n in missingnodes:
1455 clrev = revlog.linkrev(revlog.rev(n))
1454 clrev = revlog.linkrev(revlog.rev(n))
1456 if clrev in commonrevs:
1455 if clrev in commonrevs:
1457 hasset.add(n)
1456 hasset.add(n)
1458 for n in hasset:
1457 for n in hasset:
1459 missingnodes.pop(n, None)
1458 missingnodes.pop(n, None)
1460 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1459 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1461 missingnodes.pop(revlog.node(r), None)
1460 missingnodes.pop(revlog.node(r), None)
1462
1461
1463 # Add the nodes that were explicitly requested.
1462 # Add the nodes that were explicitly requested.
1464 def add_extra_nodes(name, nodes):
1463 def add_extra_nodes(name, nodes):
1465 if not extranodes or name not in extranodes:
1464 if not extranodes or name not in extranodes:
1466 return
1465 return
1467
1466
1468 for node, linknode in extranodes[name]:
1467 for node, linknode in extranodes[name]:
1469 if node not in nodes:
1468 if node not in nodes:
1470 nodes[node] = linknode
1469 nodes[node] = linknode
1471
1470
1472 # Now that we have all theses utility functions to help out and
1471 # Now that we have all theses utility functions to help out and
1473 # logically divide up the task, generate the group.
1472 # logically divide up the task, generate the group.
1474 def gengroup():
1473 def gengroup():
1475 # The set of changed files starts empty.
1474 # The set of changed files starts empty.
1476 changedfiles = set()
1475 changedfiles = set()
1477 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1476 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1478
1477
1479 # Create a changenode group generator that will call our functions
1478 # Create a changenode group generator that will call our functions
1480 # back to lookup the owning changenode and collect information.
1479 # back to lookup the owning changenode and collect information.
1481 group = cl.group(msng_cl_lst, identity, collect)
1480 group = cl.group(msng_cl_lst, identity, collect)
1482 for cnt, chnk in enumerate(group):
1481 for cnt, chnk in enumerate(group):
1483 yield chnk
1482 yield chnk
1484 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1483 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1485 self.ui.progress(_('bundling changes'), None)
1484 self.ui.progress(_('bundling changes'), None)
1486
1485
1487 prune(mnfst, msng_mnfst_set)
1486 prune(mnfst, msng_mnfst_set)
1488 add_extra_nodes(1, msng_mnfst_set)
1487 add_extra_nodes(1, msng_mnfst_set)
1489 msng_mnfst_lst = msng_mnfst_set.keys()
1488 msng_mnfst_lst = msng_mnfst_set.keys()
1490 # Sort the manifestnodes by revision number.
1489 # Sort the manifestnodes by revision number.
1491 msng_mnfst_lst.sort(key=mnfst.rev)
1490 msng_mnfst_lst.sort(key=mnfst.rev)
1492 # Create a generator for the manifestnodes that calls our lookup
1491 # Create a generator for the manifestnodes that calls our lookup
1493 # and data collection functions back.
1492 # and data collection functions back.
1494 group = mnfst.group(msng_mnfst_lst,
1493 group = mnfst.group(msng_mnfst_lst,
1495 lambda mnode: msng_mnfst_set[mnode],
1494 lambda mnode: msng_mnfst_set[mnode],
1496 filenode_collector(changedfiles))
1495 filenode_collector(changedfiles))
1497 for cnt, chnk in enumerate(group):
1496 for cnt, chnk in enumerate(group):
1498 yield chnk
1497 yield chnk
1499 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1498 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1500 self.ui.progress(_('bundling manifests'), None)
1499 self.ui.progress(_('bundling manifests'), None)
1501
1500
1502 # These are no longer needed, dereference and toss the memory for
1501 # These are no longer needed, dereference and toss the memory for
1503 # them.
1502 # them.
1504 msng_mnfst_lst = None
1503 msng_mnfst_lst = None
1505 msng_mnfst_set.clear()
1504 msng_mnfst_set.clear()
1506
1505
1507 if extranodes:
1506 if extranodes:
1508 for fname in extranodes:
1507 for fname in extranodes:
1509 if isinstance(fname, int):
1508 if isinstance(fname, int):
1510 continue
1509 continue
1511 msng_filenode_set.setdefault(fname, {})
1510 msng_filenode_set.setdefault(fname, {})
1512 changedfiles.add(fname)
1511 changedfiles.add(fname)
1513 # Go through all our files in order sorted by name.
1512 # Go through all our files in order sorted by name.
1514 cnt = 0
1513 cnt = 0
1515 for fname in sorted(changedfiles):
1514 for fname in sorted(changedfiles):
1516 filerevlog = self.file(fname)
1515 filerevlog = self.file(fname)
1517 if not len(filerevlog):
1516 if not len(filerevlog):
1518 raise util.Abort(_("empty or missing revlog for %s") % fname)
1517 raise util.Abort(_("empty or missing revlog for %s") % fname)
1519 # Toss out the filenodes that the recipient isn't really
1518 # Toss out the filenodes that the recipient isn't really
1520 # missing.
1519 # missing.
1521 missingfnodes = msng_filenode_set.pop(fname, {})
1520 missingfnodes = msng_filenode_set.pop(fname, {})
1522 prune(filerevlog, missingfnodes)
1521 prune(filerevlog, missingfnodes)
1523 add_extra_nodes(fname, missingfnodes)
1522 add_extra_nodes(fname, missingfnodes)
1524 # If any filenodes are left, generate the group for them,
1523 # If any filenodes are left, generate the group for them,
1525 # otherwise don't bother.
1524 # otherwise don't bother.
1526 if missingfnodes:
1525 if missingfnodes:
1527 yield changegroup.chunkheader(len(fname))
1526 yield changegroup.chunkheader(len(fname))
1528 yield fname
1527 yield fname
1529 # Sort the filenodes by their revision # (topological order)
1528 # Sort the filenodes by their revision # (topological order)
1530 nodeiter = list(missingfnodes)
1529 nodeiter = list(missingfnodes)
1531 nodeiter.sort(key=filerevlog.rev)
1530 nodeiter.sort(key=filerevlog.rev)
1532 # Create a group generator and only pass in a changenode
1531 # Create a group generator and only pass in a changenode
1533 # lookup function as we need to collect no information
1532 # lookup function as we need to collect no information
1534 # from filenodes.
1533 # from filenodes.
1535 group = filerevlog.group(nodeiter,
1534 group = filerevlog.group(nodeiter,
1536 lambda fnode: missingfnodes[fnode])
1535 lambda fnode: missingfnodes[fnode])
1537 for chnk in group:
1536 for chnk in group:
1538 self.ui.progress(
1537 self.ui.progress(
1539 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1538 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1540 cnt += 1
1539 cnt += 1
1541 yield chnk
1540 yield chnk
1542 # Signal that no more groups are left.
1541 # Signal that no more groups are left.
1543 yield changegroup.closechunk()
1542 yield changegroup.closechunk()
1544 self.ui.progress(_('bundling files'), None)
1543 self.ui.progress(_('bundling files'), None)
1545
1544
1546 if msng_cl_lst:
1545 if msng_cl_lst:
1547 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1546 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1548
1547
1549 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1548 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1550
1549
1551 def changegroup(self, basenodes, source):
1550 def changegroup(self, basenodes, source):
1552 # to avoid a race we use changegroupsubset() (issue1320)
1551 # to avoid a race we use changegroupsubset() (issue1320)
1553 return self.changegroupsubset(basenodes, self.heads(), source)
1552 return self.changegroupsubset(basenodes, self.heads(), source)
1554
1553
1555 def _changegroup(self, nodes, source):
1554 def _changegroup(self, nodes, source):
1556 """Compute the changegroup of all nodes that we have that a recipient
1555 """Compute the changegroup of all nodes that we have that a recipient
1557 doesn't. Return a chunkbuffer object whose read() method will return
1556 doesn't. Return a chunkbuffer object whose read() method will return
1558 successive changegroup chunks.
1557 successive changegroup chunks.
1559
1558
1560 This is much easier than the previous function as we can assume that
1559 This is much easier than the previous function as we can assume that
1561 the recipient has any changenode we aren't sending them.
1560 the recipient has any changenode we aren't sending them.
1562
1561
1563 nodes is the set of nodes to send"""
1562 nodes is the set of nodes to send"""
1564
1563
1565 self.hook('preoutgoing', throw=True, source=source)
1564 self.hook('preoutgoing', throw=True, source=source)
1566
1565
1567 cl = self.changelog
1566 cl = self.changelog
1568 revset = set([cl.rev(n) for n in nodes])
1567 revset = set([cl.rev(n) for n in nodes])
1569 self.changegroupinfo(nodes, source)
1568 self.changegroupinfo(nodes, source)
1570
1569
1571 def identity(x):
1570 def identity(x):
1572 return x
1571 return x
1573
1572
1574 def gennodelst(log):
1573 def gennodelst(log):
1575 for r in log:
1574 for r in log:
1576 if log.linkrev(r) in revset:
1575 if log.linkrev(r) in revset:
1577 yield log.node(r)
1576 yield log.node(r)
1578
1577
1579 def lookuplinkrev_func(revlog):
1578 def lookuplinkrev_func(revlog):
1580 def lookuplinkrev(n):
1579 def lookuplinkrev(n):
1581 return cl.node(revlog.linkrev(revlog.rev(n)))
1580 return cl.node(revlog.linkrev(revlog.rev(n)))
1582 return lookuplinkrev
1581 return lookuplinkrev
1583
1582
1584 def gengroup():
1583 def gengroup():
1585 '''yield a sequence of changegroup chunks (strings)'''
1584 '''yield a sequence of changegroup chunks (strings)'''
1586 # construct a list of all changed files
1585 # construct a list of all changed files
1587 changedfiles = set()
1586 changedfiles = set()
1588 mmfs = {}
1587 mmfs = {}
1589 collect = changegroup.collector(cl, mmfs, changedfiles)
1588 collect = changegroup.collector(cl, mmfs, changedfiles)
1590
1589
1591 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1590 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1592 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1591 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1593 yield chnk
1592 yield chnk
1594 self.ui.progress(_('bundling changes'), None)
1593 self.ui.progress(_('bundling changes'), None)
1595
1594
1596 mnfst = self.manifest
1595 mnfst = self.manifest
1597 nodeiter = gennodelst(mnfst)
1596 nodeiter = gennodelst(mnfst)
1598 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1597 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1599 lookuplinkrev_func(mnfst))):
1598 lookuplinkrev_func(mnfst))):
1600 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1599 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1601 yield chnk
1600 yield chnk
1602 self.ui.progress(_('bundling manifests'), None)
1601 self.ui.progress(_('bundling manifests'), None)
1603
1602
1604 cnt = 0
1603 cnt = 0
1605 for fname in sorted(changedfiles):
1604 for fname in sorted(changedfiles):
1606 filerevlog = self.file(fname)
1605 filerevlog = self.file(fname)
1607 if not len(filerevlog):
1606 if not len(filerevlog):
1608 raise util.Abort(_("empty or missing revlog for %s") % fname)
1607 raise util.Abort(_("empty or missing revlog for %s") % fname)
1609 nodeiter = gennodelst(filerevlog)
1608 nodeiter = gennodelst(filerevlog)
1610 nodeiter = list(nodeiter)
1609 nodeiter = list(nodeiter)
1611 if nodeiter:
1610 if nodeiter:
1612 yield changegroup.chunkheader(len(fname))
1611 yield changegroup.chunkheader(len(fname))
1613 yield fname
1612 yield fname
1614 lookup = lookuplinkrev_func(filerevlog)
1613 lookup = lookuplinkrev_func(filerevlog)
1615 for chnk in filerevlog.group(nodeiter, lookup):
1614 for chnk in filerevlog.group(nodeiter, lookup):
1616 self.ui.progress(
1615 self.ui.progress(
1617 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1616 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1618 cnt += 1
1617 cnt += 1
1619 yield chnk
1618 yield chnk
1620 self.ui.progress(_('bundling files'), None)
1619 self.ui.progress(_('bundling files'), None)
1621
1620
1622 yield changegroup.closechunk()
1621 yield changegroup.closechunk()
1623
1622
1624 if nodes:
1623 if nodes:
1625 self.hook('outgoing', node=hex(nodes[0]), source=source)
1624 self.hook('outgoing', node=hex(nodes[0]), source=source)
1626
1625
1627 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1626 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1628
1627
1629 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1628 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1630 """Add the changegroup returned by source.read() to this repo.
1629 """Add the changegroup returned by source.read() to this repo.
1631 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1630 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1632 the URL of the repo where this changegroup is coming from.
1631 the URL of the repo where this changegroup is coming from.
1633
1632
1634 Return an integer summarizing the change to this repo:
1633 Return an integer summarizing the change to this repo:
1635 - nothing changed or no source: 0
1634 - nothing changed or no source: 0
1636 - more heads than before: 1+added heads (2..n)
1635 - more heads than before: 1+added heads (2..n)
1637 - fewer heads than before: -1-removed heads (-2..-n)
1636 - fewer heads than before: -1-removed heads (-2..-n)
1638 - number of heads stays the same: 1
1637 - number of heads stays the same: 1
1639 """
1638 """
1640 def csmap(x):
1639 def csmap(x):
1641 self.ui.debug("add changeset %s\n" % short(x))
1640 self.ui.debug("add changeset %s\n" % short(x))
1642 return len(cl)
1641 return len(cl)
1643
1642
1644 def revmap(x):
1643 def revmap(x):
1645 return cl.rev(x)
1644 return cl.rev(x)
1646
1645
1647 if not source:
1646 if not source:
1648 return 0
1647 return 0
1649
1648
1650 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1649 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1651
1650
1652 changesets = files = revisions = 0
1651 changesets = files = revisions = 0
1653 efiles = set()
1652 efiles = set()
1654
1653
1655 # write changelog data to temp files so concurrent readers will not see
1654 # write changelog data to temp files so concurrent readers will not see
1656 # inconsistent view
1655 # inconsistent view
1657 cl = self.changelog
1656 cl = self.changelog
1658 cl.delayupdate()
1657 cl.delayupdate()
1659 oldheads = len(cl.heads())
1658 oldheads = len(cl.heads())
1660
1659
1661 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1660 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1662 try:
1661 try:
1663 trp = weakref.proxy(tr)
1662 trp = weakref.proxy(tr)
1664 # pull off the changeset group
1663 # pull off the changeset group
1665 self.ui.status(_("adding changesets\n"))
1664 self.ui.status(_("adding changesets\n"))
1666 clstart = len(cl)
1665 clstart = len(cl)
1667 class prog(object):
1666 class prog(object):
1668 step = _('changesets')
1667 step = _('changesets')
1669 count = 1
1668 count = 1
1670 ui = self.ui
1669 ui = self.ui
1671 total = None
1670 total = None
1672 def __call__(self):
1671 def __call__(self):
1673 self.ui.progress(self.step, self.count, unit=_('chunks'),
1672 self.ui.progress(self.step, self.count, unit=_('chunks'),
1674 total=self.total)
1673 total=self.total)
1675 self.count += 1
1674 self.count += 1
1676 pr = prog()
1675 pr = prog()
1677 source.callback = pr
1676 source.callback = pr
1678
1677
1679 if (cl.addgroup(source, csmap, trp) is None
1678 if (cl.addgroup(source, csmap, trp) is None
1680 and not emptyok):
1679 and not emptyok):
1681 raise util.Abort(_("received changelog group is empty"))
1680 raise util.Abort(_("received changelog group is empty"))
1682 clend = len(cl)
1681 clend = len(cl)
1683 changesets = clend - clstart
1682 changesets = clend - clstart
1684 for c in xrange(clstart, clend):
1683 for c in xrange(clstart, clend):
1685 efiles.update(self[c].files())
1684 efiles.update(self[c].files())
1686 efiles = len(efiles)
1685 efiles = len(efiles)
1687 self.ui.progress(_('changesets'), None)
1686 self.ui.progress(_('changesets'), None)
1688
1687
1689 # pull off the manifest group
1688 # pull off the manifest group
1690 self.ui.status(_("adding manifests\n"))
1689 self.ui.status(_("adding manifests\n"))
1691 pr.step = _('manifests')
1690 pr.step = _('manifests')
1692 pr.count = 1
1691 pr.count = 1
1693 pr.total = changesets # manifests <= changesets
1692 pr.total = changesets # manifests <= changesets
1694 # no need to check for empty manifest group here:
1693 # no need to check for empty manifest group here:
1695 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1694 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1696 # no new manifest will be created and the manifest group will
1695 # no new manifest will be created and the manifest group will
1697 # be empty during the pull
1696 # be empty during the pull
1698 self.manifest.addgroup(source, revmap, trp)
1697 self.manifest.addgroup(source, revmap, trp)
1699 self.ui.progress(_('manifests'), None)
1698 self.ui.progress(_('manifests'), None)
1700
1699
1701 needfiles = {}
1700 needfiles = {}
1702 if self.ui.configbool('server', 'validate', default=False):
1701 if self.ui.configbool('server', 'validate', default=False):
1703 # validate incoming csets have their manifests
1702 # validate incoming csets have their manifests
1704 for cset in xrange(clstart, clend):
1703 for cset in xrange(clstart, clend):
1705 mfest = self.changelog.read(self.changelog.node(cset))[0]
1704 mfest = self.changelog.read(self.changelog.node(cset))[0]
1706 mfest = self.manifest.readdelta(mfest)
1705 mfest = self.manifest.readdelta(mfest)
1707 # store file nodes we must see
1706 # store file nodes we must see
1708 for f, n in mfest.iteritems():
1707 for f, n in mfest.iteritems():
1709 needfiles.setdefault(f, set()).add(n)
1708 needfiles.setdefault(f, set()).add(n)
1710
1709
1711 # process the files
1710 # process the files
1712 self.ui.status(_("adding file changes\n"))
1711 self.ui.status(_("adding file changes\n"))
1713 pr.step = 'files'
1712 pr.step = 'files'
1714 pr.count = 1
1713 pr.count = 1
1715 pr.total = efiles
1714 pr.total = efiles
1716 source.callback = None
1715 source.callback = None
1717
1716
1718 while 1:
1717 while 1:
1719 f = source.chunk()
1718 f = source.chunk()
1720 if not f:
1719 if not f:
1721 break
1720 break
1722 self.ui.debug("adding %s revisions\n" % f)
1721 self.ui.debug("adding %s revisions\n" % f)
1723 pr()
1722 pr()
1724 fl = self.file(f)
1723 fl = self.file(f)
1725 o = len(fl)
1724 o = len(fl)
1726 if fl.addgroup(source, revmap, trp) is None:
1725 if fl.addgroup(source, revmap, trp) is None:
1727 raise util.Abort(_("received file revlog group is empty"))
1726 raise util.Abort(_("received file revlog group is empty"))
1728 revisions += len(fl) - o
1727 revisions += len(fl) - o
1729 files += 1
1728 files += 1
1730 if f in needfiles:
1729 if f in needfiles:
1731 needs = needfiles[f]
1730 needs = needfiles[f]
1732 for new in xrange(o, len(fl)):
1731 for new in xrange(o, len(fl)):
1733 n = fl.node(new)
1732 n = fl.node(new)
1734 if n in needs:
1733 if n in needs:
1735 needs.remove(n)
1734 needs.remove(n)
1736 if not needs:
1735 if not needs:
1737 del needfiles[f]
1736 del needfiles[f]
1738 self.ui.progress(_('files'), None)
1737 self.ui.progress(_('files'), None)
1739
1738
1740 for f, needs in needfiles.iteritems():
1739 for f, needs in needfiles.iteritems():
1741 fl = self.file(f)
1740 fl = self.file(f)
1742 for n in needs:
1741 for n in needs:
1743 try:
1742 try:
1744 fl.rev(n)
1743 fl.rev(n)
1745 except error.LookupError:
1744 except error.LookupError:
1746 raise util.Abort(
1745 raise util.Abort(
1747 _('missing file data for %s:%s - run hg verify') %
1746 _('missing file data for %s:%s - run hg verify') %
1748 (f, hex(n)))
1747 (f, hex(n)))
1749
1748
1750 newheads = len(cl.heads())
1749 newheads = len(cl.heads())
1751 heads = ""
1750 heads = ""
1752 if oldheads and newheads != oldheads:
1751 if oldheads and newheads != oldheads:
1753 heads = _(" (%+d heads)") % (newheads - oldheads)
1752 heads = _(" (%+d heads)") % (newheads - oldheads)
1754
1753
1755 self.ui.status(_("added %d changesets"
1754 self.ui.status(_("added %d changesets"
1756 " with %d changes to %d files%s\n")
1755 " with %d changes to %d files%s\n")
1757 % (changesets, revisions, files, heads))
1756 % (changesets, revisions, files, heads))
1758
1757
1759 if changesets > 0:
1758 if changesets > 0:
1760 p = lambda: cl.writepending() and self.root or ""
1759 p = lambda: cl.writepending() and self.root or ""
1761 self.hook('pretxnchangegroup', throw=True,
1760 self.hook('pretxnchangegroup', throw=True,
1762 node=hex(cl.node(clstart)), source=srctype,
1761 node=hex(cl.node(clstart)), source=srctype,
1763 url=url, pending=p)
1762 url=url, pending=p)
1764
1763
1765 # make changelog see real files again
1764 # make changelog see real files again
1766 cl.finalize(trp)
1765 cl.finalize(trp)
1767
1766
1768 tr.close()
1767 tr.close()
1769 finally:
1768 finally:
1770 tr.release()
1769 tr.release()
1771 if lock:
1770 if lock:
1772 lock.release()
1771 lock.release()
1773
1772
1774 if changesets > 0:
1773 if changesets > 0:
1775 # forcefully update the on-disk branch cache
1774 # forcefully update the on-disk branch cache
1776 self.ui.debug("updating the branch cache\n")
1775 self.ui.debug("updating the branch cache\n")
1777 self.updatebranchcache()
1776 self.updatebranchcache()
1778 self.hook("changegroup", node=hex(cl.node(clstart)),
1777 self.hook("changegroup", node=hex(cl.node(clstart)),
1779 source=srctype, url=url)
1778 source=srctype, url=url)
1780
1779
1781 for i in xrange(clstart, clend):
1780 for i in xrange(clstart, clend):
1782 self.hook("incoming", node=hex(cl.node(i)),
1781 self.hook("incoming", node=hex(cl.node(i)),
1783 source=srctype, url=url)
1782 source=srctype, url=url)
1784
1783
1785 # never return 0 here:
1784 # never return 0 here:
1786 if newheads < oldheads:
1785 if newheads < oldheads:
1787 return newheads - oldheads - 1
1786 return newheads - oldheads - 1
1788 else:
1787 else:
1789 return newheads - oldheads + 1
1788 return newheads - oldheads + 1
1790
1789
1791
1790
1792 def stream_in(self, remote, requirements):
1791 def stream_in(self, remote, requirements):
1793 fp = remote.stream_out()
1792 fp = remote.stream_out()
1794 l = fp.readline()
1793 l = fp.readline()
1795 try:
1794 try:
1796 resp = int(l)
1795 resp = int(l)
1797 except ValueError:
1796 except ValueError:
1798 raise error.ResponseError(
1797 raise error.ResponseError(
1799 _('Unexpected response from remote server:'), l)
1798 _('Unexpected response from remote server:'), l)
1800 if resp == 1:
1799 if resp == 1:
1801 raise util.Abort(_('operation forbidden by server'))
1800 raise util.Abort(_('operation forbidden by server'))
1802 elif resp == 2:
1801 elif resp == 2:
1803 raise util.Abort(_('locking the remote repository failed'))
1802 raise util.Abort(_('locking the remote repository failed'))
1804 elif resp != 0:
1803 elif resp != 0:
1805 raise util.Abort(_('the server sent an unknown error code'))
1804 raise util.Abort(_('the server sent an unknown error code'))
1806 self.ui.status(_('streaming all changes\n'))
1805 self.ui.status(_('streaming all changes\n'))
1807 l = fp.readline()
1806 l = fp.readline()
1808 try:
1807 try:
1809 total_files, total_bytes = map(int, l.split(' ', 1))
1808 total_files, total_bytes = map(int, l.split(' ', 1))
1810 except (ValueError, TypeError):
1809 except (ValueError, TypeError):
1811 raise error.ResponseError(
1810 raise error.ResponseError(
1812 _('Unexpected response from remote server:'), l)
1811 _('Unexpected response from remote server:'), l)
1813 self.ui.status(_('%d files to transfer, %s of data\n') %
1812 self.ui.status(_('%d files to transfer, %s of data\n') %
1814 (total_files, util.bytecount(total_bytes)))
1813 (total_files, util.bytecount(total_bytes)))
1815 start = time.time()
1814 start = time.time()
1816 for i in xrange(total_files):
1815 for i in xrange(total_files):
1817 # XXX doesn't support '\n' or '\r' in filenames
1816 # XXX doesn't support '\n' or '\r' in filenames
1818 l = fp.readline()
1817 l = fp.readline()
1819 try:
1818 try:
1820 name, size = l.split('\0', 1)
1819 name, size = l.split('\0', 1)
1821 size = int(size)
1820 size = int(size)
1822 except (ValueError, TypeError):
1821 except (ValueError, TypeError):
1823 raise error.ResponseError(
1822 raise error.ResponseError(
1824 _('Unexpected response from remote server:'), l)
1823 _('Unexpected response from remote server:'), l)
1825 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1824 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1826 # for backwards compat, name was partially encoded
1825 # for backwards compat, name was partially encoded
1827 ofp = self.sopener(store.decodedir(name), 'w')
1826 ofp = self.sopener(store.decodedir(name), 'w')
1828 for chunk in util.filechunkiter(fp, limit=size):
1827 for chunk in util.filechunkiter(fp, limit=size):
1829 ofp.write(chunk)
1828 ofp.write(chunk)
1830 ofp.close()
1829 ofp.close()
1831 elapsed = time.time() - start
1830 elapsed = time.time() - start
1832 if elapsed <= 0:
1831 if elapsed <= 0:
1833 elapsed = 0.001
1832 elapsed = 0.001
1834 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1833 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1835 (util.bytecount(total_bytes), elapsed,
1834 (util.bytecount(total_bytes), elapsed,
1836 util.bytecount(total_bytes / elapsed)))
1835 util.bytecount(total_bytes / elapsed)))
1837
1836
1838 # new requirements = old non-format requirements + new format-related
1837 # new requirements = old non-format requirements + new format-related
1839 # requirements from the streamed-in repository
1838 # requirements from the streamed-in repository
1840 requirements.update(set(self.requirements) - self.supportedformats)
1839 requirements.update(set(self.requirements) - self.supportedformats)
1841 self._applyrequirements(requirements)
1840 self._applyrequirements(requirements)
1842 self._writerequirements()
1841 self._writerequirements()
1843
1842
1844 self.invalidate()
1843 self.invalidate()
1845 return len(self.heads()) + 1
1844 return len(self.heads()) + 1
1846
1845
1847 def clone(self, remote, heads=[], stream=False):
1846 def clone(self, remote, heads=[], stream=False):
1848 '''clone remote repository.
1847 '''clone remote repository.
1849
1848
1850 keyword arguments:
1849 keyword arguments:
1851 heads: list of revs to clone (forces use of pull)
1850 heads: list of revs to clone (forces use of pull)
1852 stream: use streaming clone if possible'''
1851 stream: use streaming clone if possible'''
1853
1852
1854 # now, all clients that can request uncompressed clones can
1853 # now, all clients that can request uncompressed clones can
1855 # read repo formats supported by all servers that can serve
1854 # read repo formats supported by all servers that can serve
1856 # them.
1855 # them.
1857
1856
1858 # if revlog format changes, client will have to check version
1857 # if revlog format changes, client will have to check version
1859 # and format flags on "stream" capability, and use
1858 # and format flags on "stream" capability, and use
1860 # uncompressed only if compatible.
1859 # uncompressed only if compatible.
1861
1860
1862 if stream and not heads:
1861 if stream and not heads:
1863 # 'stream' means remote revlog format is revlogv1 only
1862 # 'stream' means remote revlog format is revlogv1 only
1864 if remote.capable('stream'):
1863 if remote.capable('stream'):
1865 return self.stream_in(remote, set(('revlogv1',)))
1864 return self.stream_in(remote, set(('revlogv1',)))
1866 # otherwise, 'streamreqs' contains the remote revlog format
1865 # otherwise, 'streamreqs' contains the remote revlog format
1867 streamreqs = remote.capable('streamreqs')
1866 streamreqs = remote.capable('streamreqs')
1868 if streamreqs:
1867 if streamreqs:
1869 streamreqs = set(streamreqs.split(','))
1868 streamreqs = set(streamreqs.split(','))
1870 # if we support it, stream in and adjust our requirements
1869 # if we support it, stream in and adjust our requirements
1871 if not streamreqs - self.supportedformats:
1870 if not streamreqs - self.supportedformats:
1872 return self.stream_in(remote, streamreqs)
1871 return self.stream_in(remote, streamreqs)
1873 return self.pull(remote, heads)
1872 return self.pull(remote, heads)
1874
1873
1875 def pushkey(self, namespace, key, old, new):
1874 def pushkey(self, namespace, key, old, new):
1876 return pushkey.push(self, namespace, key, old, new)
1875 return pushkey.push(self, namespace, key, old, new)
1877
1876
1878 def listkeys(self, namespace):
1877 def listkeys(self, namespace):
1879 return pushkey.list(self, namespace)
1878 return pushkey.list(self, namespace)
1880
1879
1881 # used to avoid circular references so destructors work
1880 # used to avoid circular references so destructors work
1882 def aftertrans(files):
1881 def aftertrans(files):
1883 renamefiles = [tuple(t) for t in files]
1882 renamefiles = [tuple(t) for t in files]
1884 def a():
1883 def a():
1885 for src, dest in renamefiles:
1884 for src, dest in renamefiles:
1886 util.rename(src, dest)
1885 util.rename(src, dest)
1887 return a
1886 return a
1888
1887
1889 def instance(ui, path, create):
1888 def instance(ui, path, create):
1890 return localrepository(ui, util.drop_scheme('file', path), create)
1889 return localrepository(ui, util.drop_scheme('file', path), create)
1891
1890
1892 def islocal(path):
1891 def islocal(path):
1893 return True
1892 return True
General Comments 0
You need to be logged in to leave comments. Login now