##// END OF EJS Templates
localrepo: move string formatting out of gettext call
Martin Geisler -
r13037:9beac11b default
parent child Browse files
Show More
@@ -1,1916 +1,1916 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 import url as urlmod
17 import url as urlmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 propertycache = util.propertycache
20 propertycache = util.propertycache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
24 supportedformats = set(('revlogv1', 'parentdelta'))
24 supportedformats = set(('revlogv1', 'parentdelta'))
25 supported = supportedformats | set(('store', 'fncache', 'shared',
25 supported = supportedformats | set(('store', 'fncache', 'shared',
26 'dotencode'))
26 'dotencode'))
27
27
28 def __init__(self, baseui, path=None, create=0):
28 def __init__(self, baseui, path=None, create=0):
29 repo.repository.__init__(self)
29 repo.repository.__init__(self)
30 self.root = os.path.realpath(util.expandpath(path))
30 self.root = os.path.realpath(util.expandpath(path))
31 self.path = os.path.join(self.root, ".hg")
31 self.path = os.path.join(self.root, ".hg")
32 self.origroot = path
32 self.origroot = path
33 self.auditor = util.path_auditor(self.root, self._checknested)
33 self.auditor = util.path_auditor(self.root, self._checknested)
34 self.opener = util.opener(self.path)
34 self.opener = util.opener(self.path)
35 self.wopener = util.opener(self.root)
35 self.wopener = util.opener(self.root)
36 self.baseui = baseui
36 self.baseui = baseui
37 self.ui = baseui.copy()
37 self.ui = baseui.copy()
38
38
39 try:
39 try:
40 self.ui.readconfig(self.join("hgrc"), self.root)
40 self.ui.readconfig(self.join("hgrc"), self.root)
41 extensions.loadall(self.ui)
41 extensions.loadall(self.ui)
42 except IOError:
42 except IOError:
43 pass
43 pass
44
44
45 if not os.path.isdir(self.path):
45 if not os.path.isdir(self.path):
46 if create:
46 if create:
47 if not os.path.exists(path):
47 if not os.path.exists(path):
48 util.makedirs(path)
48 util.makedirs(path)
49 os.mkdir(self.path)
49 os.mkdir(self.path)
50 requirements = ["revlogv1"]
50 requirements = ["revlogv1"]
51 if self.ui.configbool('format', 'usestore', True):
51 if self.ui.configbool('format', 'usestore', True):
52 os.mkdir(os.path.join(self.path, "store"))
52 os.mkdir(os.path.join(self.path, "store"))
53 requirements.append("store")
53 requirements.append("store")
54 if self.ui.configbool('format', 'usefncache', True):
54 if self.ui.configbool('format', 'usefncache', True):
55 requirements.append("fncache")
55 requirements.append("fncache")
56 if self.ui.configbool('format', 'dotencode', True):
56 if self.ui.configbool('format', 'dotencode', True):
57 requirements.append('dotencode')
57 requirements.append('dotencode')
58 # create an invalid changelog
58 # create an invalid changelog
59 self.opener("00changelog.i", "a").write(
59 self.opener("00changelog.i", "a").write(
60 '\0\0\0\2' # represents revlogv2
60 '\0\0\0\2' # represents revlogv2
61 ' dummy changelog to prevent using the old repo layout'
61 ' dummy changelog to prevent using the old repo layout'
62 )
62 )
63 if self.ui.configbool('format', 'parentdelta', False):
63 if self.ui.configbool('format', 'parentdelta', False):
64 requirements.append("parentdelta")
64 requirements.append("parentdelta")
65 else:
65 else:
66 raise error.RepoError(_("repository %s not found") % path)
66 raise error.RepoError(_("repository %s not found") % path)
67 elif create:
67 elif create:
68 raise error.RepoError(_("repository %s already exists") % path)
68 raise error.RepoError(_("repository %s already exists") % path)
69 else:
69 else:
70 # find requirements
70 # find requirements
71 requirements = set()
71 requirements = set()
72 try:
72 try:
73 requirements = set(self.opener("requires").read().splitlines())
73 requirements = set(self.opener("requires").read().splitlines())
74 except IOError, inst:
74 except IOError, inst:
75 if inst.errno != errno.ENOENT:
75 if inst.errno != errno.ENOENT:
76 raise
76 raise
77 for r in requirements - self.supported:
77 for r in requirements - self.supported:
78 raise error.RepoError(_("requirement '%s' not supported") % r)
78 raise error.RepoError(_("requirement '%s' not supported") % r)
79
79
80 self.sharedpath = self.path
80 self.sharedpath = self.path
81 try:
81 try:
82 s = os.path.realpath(self.opener("sharedpath").read())
82 s = os.path.realpath(self.opener("sharedpath").read())
83 if not os.path.exists(s):
83 if not os.path.exists(s):
84 raise error.RepoError(
84 raise error.RepoError(
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 self.sharedpath = s
86 self.sharedpath = s
87 except IOError, inst:
87 except IOError, inst:
88 if inst.errno != errno.ENOENT:
88 if inst.errno != errno.ENOENT:
89 raise
89 raise
90
90
91 self.store = store.store(requirements, self.sharedpath, util.opener)
91 self.store = store.store(requirements, self.sharedpath, util.opener)
92 self.spath = self.store.path
92 self.spath = self.store.path
93 self.sopener = self.store.opener
93 self.sopener = self.store.opener
94 self.sjoin = self.store.join
94 self.sjoin = self.store.join
95 self.opener.createmode = self.store.createmode
95 self.opener.createmode = self.store.createmode
96 self._applyrequirements(requirements)
96 self._applyrequirements(requirements)
97 if create:
97 if create:
98 self._writerequirements()
98 self._writerequirements()
99
99
100 # These two define the set of tags for this repository. _tags
100 # These two define the set of tags for this repository. _tags
101 # maps tag name to node; _tagtypes maps tag name to 'global' or
101 # maps tag name to node; _tagtypes maps tag name to 'global' or
102 # 'local'. (Global tags are defined by .hgtags across all
102 # 'local'. (Global tags are defined by .hgtags across all
103 # heads, and local tags are defined in .hg/localtags.) They
103 # heads, and local tags are defined in .hg/localtags.) They
104 # constitute the in-memory cache of tags.
104 # constitute the in-memory cache of tags.
105 self._tags = None
105 self._tags = None
106 self._tagtypes = None
106 self._tagtypes = None
107
107
108 self._branchcache = None # in UTF-8
108 self._branchcache = None # in UTF-8
109 self._branchcachetip = None
109 self._branchcachetip = None
110 self.nodetagscache = None
110 self.nodetagscache = None
111 self.filterpats = {}
111 self.filterpats = {}
112 self._datafilters = {}
112 self._datafilters = {}
113 self._transref = self._lockref = self._wlockref = None
113 self._transref = self._lockref = self._wlockref = None
114
114
115 def _applyrequirements(self, requirements):
115 def _applyrequirements(self, requirements):
116 self.requirements = requirements
116 self.requirements = requirements
117 self.sopener.options = {}
117 self.sopener.options = {}
118 if 'parentdelta' in requirements:
118 if 'parentdelta' in requirements:
119 self.sopener.options['parentdelta'] = 1
119 self.sopener.options['parentdelta'] = 1
120
120
121 def _writerequirements(self):
121 def _writerequirements(self):
122 reqfile = self.opener("requires", "w")
122 reqfile = self.opener("requires", "w")
123 for r in self.requirements:
123 for r in self.requirements:
124 reqfile.write("%s\n" % r)
124 reqfile.write("%s\n" % r)
125 reqfile.close()
125 reqfile.close()
126
126
127 def _checknested(self, path):
127 def _checknested(self, path):
128 """Determine if path is a legal nested repository."""
128 """Determine if path is a legal nested repository."""
129 if not path.startswith(self.root):
129 if not path.startswith(self.root):
130 return False
130 return False
131 subpath = path[len(self.root) + 1:]
131 subpath = path[len(self.root) + 1:]
132
132
133 # XXX: Checking against the current working copy is wrong in
133 # XXX: Checking against the current working copy is wrong in
134 # the sense that it can reject things like
134 # the sense that it can reject things like
135 #
135 #
136 # $ hg cat -r 10 sub/x.txt
136 # $ hg cat -r 10 sub/x.txt
137 #
137 #
138 # if sub/ is no longer a subrepository in the working copy
138 # if sub/ is no longer a subrepository in the working copy
139 # parent revision.
139 # parent revision.
140 #
140 #
141 # However, it can of course also allow things that would have
141 # However, it can of course also allow things that would have
142 # been rejected before, such as the above cat command if sub/
142 # been rejected before, such as the above cat command if sub/
143 # is a subrepository now, but was a normal directory before.
143 # is a subrepository now, but was a normal directory before.
144 # The old path auditor would have rejected by mistake since it
144 # The old path auditor would have rejected by mistake since it
145 # panics when it sees sub/.hg/.
145 # panics when it sees sub/.hg/.
146 #
146 #
147 # All in all, checking against the working copy seems sensible
147 # All in all, checking against the working copy seems sensible
148 # since we want to prevent access to nested repositories on
148 # since we want to prevent access to nested repositories on
149 # the filesystem *now*.
149 # the filesystem *now*.
150 ctx = self[None]
150 ctx = self[None]
151 parts = util.splitpath(subpath)
151 parts = util.splitpath(subpath)
152 while parts:
152 while parts:
153 prefix = os.sep.join(parts)
153 prefix = os.sep.join(parts)
154 if prefix in ctx.substate:
154 if prefix in ctx.substate:
155 if prefix == subpath:
155 if prefix == subpath:
156 return True
156 return True
157 else:
157 else:
158 sub = ctx.sub(prefix)
158 sub = ctx.sub(prefix)
159 return sub.checknested(subpath[len(prefix) + 1:])
159 return sub.checknested(subpath[len(prefix) + 1:])
160 else:
160 else:
161 parts.pop()
161 parts.pop()
162 return False
162 return False
163
163
164
164
165 @propertycache
165 @propertycache
166 def changelog(self):
166 def changelog(self):
167 c = changelog.changelog(self.sopener)
167 c = changelog.changelog(self.sopener)
168 if 'HG_PENDING' in os.environ:
168 if 'HG_PENDING' in os.environ:
169 p = os.environ['HG_PENDING']
169 p = os.environ['HG_PENDING']
170 if p.startswith(self.root):
170 if p.startswith(self.root):
171 c.readpending('00changelog.i.a')
171 c.readpending('00changelog.i.a')
172 self.sopener.options['defversion'] = c.version
172 self.sopener.options['defversion'] = c.version
173 return c
173 return c
174
174
175 @propertycache
175 @propertycache
176 def manifest(self):
176 def manifest(self):
177 return manifest.manifest(self.sopener)
177 return manifest.manifest(self.sopener)
178
178
179 @propertycache
179 @propertycache
180 def dirstate(self):
180 def dirstate(self):
181 warned = [0]
181 warned = [0]
182 def validate(node):
182 def validate(node):
183 try:
183 try:
184 r = self.changelog.rev(node)
184 r = self.changelog.rev(node)
185 return node
185 return node
186 except error.LookupError:
186 except error.LookupError:
187 if not warned[0]:
187 if not warned[0]:
188 warned[0] = True
188 warned[0] = True
189 self.ui.warn(_("warning: ignoring unknown"
189 self.ui.warn(_("warning: ignoring unknown"
190 " working parent %s!\n" % short(node)))
190 " working parent %s!\n") % short(node))
191 return nullid
191 return nullid
192
192
193 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
193 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
194
194
195 def __getitem__(self, changeid):
195 def __getitem__(self, changeid):
196 if changeid is None:
196 if changeid is None:
197 return context.workingctx(self)
197 return context.workingctx(self)
198 return context.changectx(self, changeid)
198 return context.changectx(self, changeid)
199
199
200 def __contains__(self, changeid):
200 def __contains__(self, changeid):
201 try:
201 try:
202 return bool(self.lookup(changeid))
202 return bool(self.lookup(changeid))
203 except error.RepoLookupError:
203 except error.RepoLookupError:
204 return False
204 return False
205
205
206 def __nonzero__(self):
206 def __nonzero__(self):
207 return True
207 return True
208
208
209 def __len__(self):
209 def __len__(self):
210 return len(self.changelog)
210 return len(self.changelog)
211
211
212 def __iter__(self):
212 def __iter__(self):
213 for i in xrange(len(self)):
213 for i in xrange(len(self)):
214 yield i
214 yield i
215
215
216 def url(self):
216 def url(self):
217 return 'file:' + self.root
217 return 'file:' + self.root
218
218
219 def hook(self, name, throw=False, **args):
219 def hook(self, name, throw=False, **args):
220 return hook.hook(self.ui, self, name, throw, **args)
220 return hook.hook(self.ui, self, name, throw, **args)
221
221
222 tag_disallowed = ':\r\n'
222 tag_disallowed = ':\r\n'
223
223
224 def _tag(self, names, node, message, local, user, date, extra={}):
224 def _tag(self, names, node, message, local, user, date, extra={}):
225 if isinstance(names, str):
225 if isinstance(names, str):
226 allchars = names
226 allchars = names
227 names = (names,)
227 names = (names,)
228 else:
228 else:
229 allchars = ''.join(names)
229 allchars = ''.join(names)
230 for c in self.tag_disallowed:
230 for c in self.tag_disallowed:
231 if c in allchars:
231 if c in allchars:
232 raise util.Abort(_('%r cannot be used in a tag name') % c)
232 raise util.Abort(_('%r cannot be used in a tag name') % c)
233
233
234 branches = self.branchmap()
234 branches = self.branchmap()
235 for name in names:
235 for name in names:
236 self.hook('pretag', throw=True, node=hex(node), tag=name,
236 self.hook('pretag', throw=True, node=hex(node), tag=name,
237 local=local)
237 local=local)
238 if name in branches:
238 if name in branches:
239 self.ui.warn(_("warning: tag %s conflicts with existing"
239 self.ui.warn(_("warning: tag %s conflicts with existing"
240 " branch name\n") % name)
240 " branch name\n") % name)
241
241
242 def writetags(fp, names, munge, prevtags):
242 def writetags(fp, names, munge, prevtags):
243 fp.seek(0, 2)
243 fp.seek(0, 2)
244 if prevtags and prevtags[-1] != '\n':
244 if prevtags and prevtags[-1] != '\n':
245 fp.write('\n')
245 fp.write('\n')
246 for name in names:
246 for name in names:
247 m = munge and munge(name) or name
247 m = munge and munge(name) or name
248 if self._tagtypes and name in self._tagtypes:
248 if self._tagtypes and name in self._tagtypes:
249 old = self._tags.get(name, nullid)
249 old = self._tags.get(name, nullid)
250 fp.write('%s %s\n' % (hex(old), m))
250 fp.write('%s %s\n' % (hex(old), m))
251 fp.write('%s %s\n' % (hex(node), m))
251 fp.write('%s %s\n' % (hex(node), m))
252 fp.close()
252 fp.close()
253
253
254 prevtags = ''
254 prevtags = ''
255 if local:
255 if local:
256 try:
256 try:
257 fp = self.opener('localtags', 'r+')
257 fp = self.opener('localtags', 'r+')
258 except IOError:
258 except IOError:
259 fp = self.opener('localtags', 'a')
259 fp = self.opener('localtags', 'a')
260 else:
260 else:
261 prevtags = fp.read()
261 prevtags = fp.read()
262
262
263 # local tags are stored in the current charset
263 # local tags are stored in the current charset
264 writetags(fp, names, None, prevtags)
264 writetags(fp, names, None, prevtags)
265 for name in names:
265 for name in names:
266 self.hook('tag', node=hex(node), tag=name, local=local)
266 self.hook('tag', node=hex(node), tag=name, local=local)
267 return
267 return
268
268
269 try:
269 try:
270 fp = self.wfile('.hgtags', 'rb+')
270 fp = self.wfile('.hgtags', 'rb+')
271 except IOError:
271 except IOError:
272 fp = self.wfile('.hgtags', 'ab')
272 fp = self.wfile('.hgtags', 'ab')
273 else:
273 else:
274 prevtags = fp.read()
274 prevtags = fp.read()
275
275
276 # committed tags are stored in UTF-8
276 # committed tags are stored in UTF-8
277 writetags(fp, names, encoding.fromlocal, prevtags)
277 writetags(fp, names, encoding.fromlocal, prevtags)
278
278
279 if '.hgtags' not in self.dirstate:
279 if '.hgtags' not in self.dirstate:
280 self[None].add(['.hgtags'])
280 self[None].add(['.hgtags'])
281
281
282 m = matchmod.exact(self.root, '', ['.hgtags'])
282 m = matchmod.exact(self.root, '', ['.hgtags'])
283 tagnode = self.commit(message, user, date, extra=extra, match=m)
283 tagnode = self.commit(message, user, date, extra=extra, match=m)
284
284
285 for name in names:
285 for name in names:
286 self.hook('tag', node=hex(node), tag=name, local=local)
286 self.hook('tag', node=hex(node), tag=name, local=local)
287
287
288 return tagnode
288 return tagnode
289
289
290 def tag(self, names, node, message, local, user, date):
290 def tag(self, names, node, message, local, user, date):
291 '''tag a revision with one or more symbolic names.
291 '''tag a revision with one or more symbolic names.
292
292
293 names is a list of strings or, when adding a single tag, names may be a
293 names is a list of strings or, when adding a single tag, names may be a
294 string.
294 string.
295
295
296 if local is True, the tags are stored in a per-repository file.
296 if local is True, the tags are stored in a per-repository file.
297 otherwise, they are stored in the .hgtags file, and a new
297 otherwise, they are stored in the .hgtags file, and a new
298 changeset is committed with the change.
298 changeset is committed with the change.
299
299
300 keyword arguments:
300 keyword arguments:
301
301
302 local: whether to store tags in non-version-controlled file
302 local: whether to store tags in non-version-controlled file
303 (default False)
303 (default False)
304
304
305 message: commit message to use if committing
305 message: commit message to use if committing
306
306
307 user: name of user to use if committing
307 user: name of user to use if committing
308
308
309 date: date tuple to use if committing'''
309 date: date tuple to use if committing'''
310
310
311 for x in self.status()[:5]:
311 for x in self.status()[:5]:
312 if '.hgtags' in x:
312 if '.hgtags' in x:
313 raise util.Abort(_('working copy of .hgtags is changed '
313 raise util.Abort(_('working copy of .hgtags is changed '
314 '(please commit .hgtags manually)'))
314 '(please commit .hgtags manually)'))
315
315
316 self.tags() # instantiate the cache
316 self.tags() # instantiate the cache
317 self._tag(names, node, message, local, user, date)
317 self._tag(names, node, message, local, user, date)
318
318
319 def tags(self):
319 def tags(self):
320 '''return a mapping of tag to node'''
320 '''return a mapping of tag to node'''
321 if self._tags is None:
321 if self._tags is None:
322 (self._tags, self._tagtypes) = self._findtags()
322 (self._tags, self._tagtypes) = self._findtags()
323
323
324 return self._tags
324 return self._tags
325
325
326 def _findtags(self):
326 def _findtags(self):
327 '''Do the hard work of finding tags. Return a pair of dicts
327 '''Do the hard work of finding tags. Return a pair of dicts
328 (tags, tagtypes) where tags maps tag name to node, and tagtypes
328 (tags, tagtypes) where tags maps tag name to node, and tagtypes
329 maps tag name to a string like \'global\' or \'local\'.
329 maps tag name to a string like \'global\' or \'local\'.
330 Subclasses or extensions are free to add their own tags, but
330 Subclasses or extensions are free to add their own tags, but
331 should be aware that the returned dicts will be retained for the
331 should be aware that the returned dicts will be retained for the
332 duration of the localrepo object.'''
332 duration of the localrepo object.'''
333
333
334 # XXX what tagtype should subclasses/extensions use? Currently
334 # XXX what tagtype should subclasses/extensions use? Currently
335 # mq and bookmarks add tags, but do not set the tagtype at all.
335 # mq and bookmarks add tags, but do not set the tagtype at all.
336 # Should each extension invent its own tag type? Should there
336 # Should each extension invent its own tag type? Should there
337 # be one tagtype for all such "virtual" tags? Or is the status
337 # be one tagtype for all such "virtual" tags? Or is the status
338 # quo fine?
338 # quo fine?
339
339
340 alltags = {} # map tag name to (node, hist)
340 alltags = {} # map tag name to (node, hist)
341 tagtypes = {}
341 tagtypes = {}
342
342
343 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
343 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
344 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
344 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
345
345
346 # Build the return dicts. Have to re-encode tag names because
346 # Build the return dicts. Have to re-encode tag names because
347 # the tags module always uses UTF-8 (in order not to lose info
347 # the tags module always uses UTF-8 (in order not to lose info
348 # writing to the cache), but the rest of Mercurial wants them in
348 # writing to the cache), but the rest of Mercurial wants them in
349 # local encoding.
349 # local encoding.
350 tags = {}
350 tags = {}
351 for (name, (node, hist)) in alltags.iteritems():
351 for (name, (node, hist)) in alltags.iteritems():
352 if node != nullid:
352 if node != nullid:
353 tags[encoding.tolocal(name)] = node
353 tags[encoding.tolocal(name)] = node
354 tags['tip'] = self.changelog.tip()
354 tags['tip'] = self.changelog.tip()
355 tagtypes = dict([(encoding.tolocal(name), value)
355 tagtypes = dict([(encoding.tolocal(name), value)
356 for (name, value) in tagtypes.iteritems()])
356 for (name, value) in tagtypes.iteritems()])
357 return (tags, tagtypes)
357 return (tags, tagtypes)
358
358
359 def tagtype(self, tagname):
359 def tagtype(self, tagname):
360 '''
360 '''
361 return the type of the given tag. result can be:
361 return the type of the given tag. result can be:
362
362
363 'local' : a local tag
363 'local' : a local tag
364 'global' : a global tag
364 'global' : a global tag
365 None : tag does not exist
365 None : tag does not exist
366 '''
366 '''
367
367
368 self.tags()
368 self.tags()
369
369
370 return self._tagtypes.get(tagname)
370 return self._tagtypes.get(tagname)
371
371
372 def tagslist(self):
372 def tagslist(self):
373 '''return a list of tags ordered by revision'''
373 '''return a list of tags ordered by revision'''
374 l = []
374 l = []
375 for t, n in self.tags().iteritems():
375 for t, n in self.tags().iteritems():
376 try:
376 try:
377 r = self.changelog.rev(n)
377 r = self.changelog.rev(n)
378 except:
378 except:
379 r = -2 # sort to the beginning of the list if unknown
379 r = -2 # sort to the beginning of the list if unknown
380 l.append((r, t, n))
380 l.append((r, t, n))
381 return [(t, n) for r, t, n in sorted(l)]
381 return [(t, n) for r, t, n in sorted(l)]
382
382
383 def nodetags(self, node):
383 def nodetags(self, node):
384 '''return the tags associated with a node'''
384 '''return the tags associated with a node'''
385 if not self.nodetagscache:
385 if not self.nodetagscache:
386 self.nodetagscache = {}
386 self.nodetagscache = {}
387 for t, n in self.tags().iteritems():
387 for t, n in self.tags().iteritems():
388 self.nodetagscache.setdefault(n, []).append(t)
388 self.nodetagscache.setdefault(n, []).append(t)
389 for tags in self.nodetagscache.itervalues():
389 for tags in self.nodetagscache.itervalues():
390 tags.sort()
390 tags.sort()
391 return self.nodetagscache.get(node, [])
391 return self.nodetagscache.get(node, [])
392
392
393 def _branchtags(self, partial, lrev):
393 def _branchtags(self, partial, lrev):
394 # TODO: rename this function?
394 # TODO: rename this function?
395 tiprev = len(self) - 1
395 tiprev = len(self) - 1
396 if lrev != tiprev:
396 if lrev != tiprev:
397 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
397 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
398 self._updatebranchcache(partial, ctxgen)
398 self._updatebranchcache(partial, ctxgen)
399 self._writebranchcache(partial, self.changelog.tip(), tiprev)
399 self._writebranchcache(partial, self.changelog.tip(), tiprev)
400
400
401 return partial
401 return partial
402
402
403 def updatebranchcache(self):
403 def updatebranchcache(self):
404 tip = self.changelog.tip()
404 tip = self.changelog.tip()
405 if self._branchcache is not None and self._branchcachetip == tip:
405 if self._branchcache is not None and self._branchcachetip == tip:
406 return self._branchcache
406 return self._branchcache
407
407
408 oldtip = self._branchcachetip
408 oldtip = self._branchcachetip
409 self._branchcachetip = tip
409 self._branchcachetip = tip
410 if oldtip is None or oldtip not in self.changelog.nodemap:
410 if oldtip is None or oldtip not in self.changelog.nodemap:
411 partial, last, lrev = self._readbranchcache()
411 partial, last, lrev = self._readbranchcache()
412 else:
412 else:
413 lrev = self.changelog.rev(oldtip)
413 lrev = self.changelog.rev(oldtip)
414 partial = self._branchcache
414 partial = self._branchcache
415
415
416 self._branchtags(partial, lrev)
416 self._branchtags(partial, lrev)
417 # this private cache holds all heads (not just tips)
417 # this private cache holds all heads (not just tips)
418 self._branchcache = partial
418 self._branchcache = partial
419
419
420 def branchmap(self):
420 def branchmap(self):
421 '''returns a dictionary {branch: [branchheads]}'''
421 '''returns a dictionary {branch: [branchheads]}'''
422 self.updatebranchcache()
422 self.updatebranchcache()
423 return self._branchcache
423 return self._branchcache
424
424
425 def branchtags(self):
425 def branchtags(self):
426 '''return a dict where branch names map to the tipmost head of
426 '''return a dict where branch names map to the tipmost head of
427 the branch, open heads come before closed'''
427 the branch, open heads come before closed'''
428 bt = {}
428 bt = {}
429 for bn, heads in self.branchmap().iteritems():
429 for bn, heads in self.branchmap().iteritems():
430 tip = heads[-1]
430 tip = heads[-1]
431 for h in reversed(heads):
431 for h in reversed(heads):
432 if 'close' not in self.changelog.read(h)[5]:
432 if 'close' not in self.changelog.read(h)[5]:
433 tip = h
433 tip = h
434 break
434 break
435 bt[bn] = tip
435 bt[bn] = tip
436 return bt
436 return bt
437
437
438
438
439 def _readbranchcache(self):
439 def _readbranchcache(self):
440 partial = {}
440 partial = {}
441 try:
441 try:
442 f = self.opener("branchheads.cache")
442 f = self.opener("branchheads.cache")
443 lines = f.read().split('\n')
443 lines = f.read().split('\n')
444 f.close()
444 f.close()
445 except (IOError, OSError):
445 except (IOError, OSError):
446 return {}, nullid, nullrev
446 return {}, nullid, nullrev
447
447
448 try:
448 try:
449 last, lrev = lines.pop(0).split(" ", 1)
449 last, lrev = lines.pop(0).split(" ", 1)
450 last, lrev = bin(last), int(lrev)
450 last, lrev = bin(last), int(lrev)
451 if lrev >= len(self) or self[lrev].node() != last:
451 if lrev >= len(self) or self[lrev].node() != last:
452 # invalidate the cache
452 # invalidate the cache
453 raise ValueError('invalidating branch cache (tip differs)')
453 raise ValueError('invalidating branch cache (tip differs)')
454 for l in lines:
454 for l in lines:
455 if not l:
455 if not l:
456 continue
456 continue
457 node, label = l.split(" ", 1)
457 node, label = l.split(" ", 1)
458 partial.setdefault(label.strip(), []).append(bin(node))
458 partial.setdefault(label.strip(), []).append(bin(node))
459 except KeyboardInterrupt:
459 except KeyboardInterrupt:
460 raise
460 raise
461 except Exception, inst:
461 except Exception, inst:
462 if self.ui.debugflag:
462 if self.ui.debugflag:
463 self.ui.warn(str(inst), '\n')
463 self.ui.warn(str(inst), '\n')
464 partial, last, lrev = {}, nullid, nullrev
464 partial, last, lrev = {}, nullid, nullrev
465 return partial, last, lrev
465 return partial, last, lrev
466
466
467 def _writebranchcache(self, branches, tip, tiprev):
467 def _writebranchcache(self, branches, tip, tiprev):
468 try:
468 try:
469 f = self.opener("branchheads.cache", "w", atomictemp=True)
469 f = self.opener("branchheads.cache", "w", atomictemp=True)
470 f.write("%s %s\n" % (hex(tip), tiprev))
470 f.write("%s %s\n" % (hex(tip), tiprev))
471 for label, nodes in branches.iteritems():
471 for label, nodes in branches.iteritems():
472 for node in nodes:
472 for node in nodes:
473 f.write("%s %s\n" % (hex(node), label))
473 f.write("%s %s\n" % (hex(node), label))
474 f.rename()
474 f.rename()
475 except (IOError, OSError):
475 except (IOError, OSError):
476 pass
476 pass
477
477
478 def _updatebranchcache(self, partial, ctxgen):
478 def _updatebranchcache(self, partial, ctxgen):
479 # collect new branch entries
479 # collect new branch entries
480 newbranches = {}
480 newbranches = {}
481 for c in ctxgen:
481 for c in ctxgen:
482 newbranches.setdefault(c.branch(), []).append(c.node())
482 newbranches.setdefault(c.branch(), []).append(c.node())
483 # if older branchheads are reachable from new ones, they aren't
483 # if older branchheads are reachable from new ones, they aren't
484 # really branchheads. Note checking parents is insufficient:
484 # really branchheads. Note checking parents is insufficient:
485 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
485 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
486 for branch, newnodes in newbranches.iteritems():
486 for branch, newnodes in newbranches.iteritems():
487 bheads = partial.setdefault(branch, [])
487 bheads = partial.setdefault(branch, [])
488 bheads.extend(newnodes)
488 bheads.extend(newnodes)
489 if len(bheads) <= 1:
489 if len(bheads) <= 1:
490 continue
490 continue
491 # starting from tip means fewer passes over reachable
491 # starting from tip means fewer passes over reachable
492 while newnodes:
492 while newnodes:
493 latest = newnodes.pop()
493 latest = newnodes.pop()
494 if latest not in bheads:
494 if latest not in bheads:
495 continue
495 continue
496 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
496 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
497 reachable = self.changelog.reachable(latest, minbhrev)
497 reachable = self.changelog.reachable(latest, minbhrev)
498 reachable.remove(latest)
498 reachable.remove(latest)
499 bheads = [b for b in bheads if b not in reachable]
499 bheads = [b for b in bheads if b not in reachable]
500 partial[branch] = bheads
500 partial[branch] = bheads
501
501
502 def lookup(self, key):
502 def lookup(self, key):
503 if isinstance(key, int):
503 if isinstance(key, int):
504 return self.changelog.node(key)
504 return self.changelog.node(key)
505 elif key == '.':
505 elif key == '.':
506 return self.dirstate.parents()[0]
506 return self.dirstate.parents()[0]
507 elif key == 'null':
507 elif key == 'null':
508 return nullid
508 return nullid
509 elif key == 'tip':
509 elif key == 'tip':
510 return self.changelog.tip()
510 return self.changelog.tip()
511 n = self.changelog._match(key)
511 n = self.changelog._match(key)
512 if n:
512 if n:
513 return n
513 return n
514 if key in self.tags():
514 if key in self.tags():
515 return self.tags()[key]
515 return self.tags()[key]
516 if key in self.branchtags():
516 if key in self.branchtags():
517 return self.branchtags()[key]
517 return self.branchtags()[key]
518 n = self.changelog._partialmatch(key)
518 n = self.changelog._partialmatch(key)
519 if n:
519 if n:
520 return n
520 return n
521
521
522 # can't find key, check if it might have come from damaged dirstate
522 # can't find key, check if it might have come from damaged dirstate
523 if key in self.dirstate.parents():
523 if key in self.dirstate.parents():
524 raise error.Abort(_("working directory has unknown parent '%s'!")
524 raise error.Abort(_("working directory has unknown parent '%s'!")
525 % short(key))
525 % short(key))
526 try:
526 try:
527 if len(key) == 20:
527 if len(key) == 20:
528 key = hex(key)
528 key = hex(key)
529 except:
529 except:
530 pass
530 pass
531 raise error.RepoLookupError(_("unknown revision '%s'") % key)
531 raise error.RepoLookupError(_("unknown revision '%s'") % key)
532
532
533 def lookupbranch(self, key, remote=None):
533 def lookupbranch(self, key, remote=None):
534 repo = remote or self
534 repo = remote or self
535 if key in repo.branchmap():
535 if key in repo.branchmap():
536 return key
536 return key
537
537
538 repo = (remote and remote.local()) and remote or self
538 repo = (remote and remote.local()) and remote or self
539 return repo[key].branch()
539 return repo[key].branch()
540
540
541 def local(self):
541 def local(self):
542 return True
542 return True
543
543
544 def join(self, f):
544 def join(self, f):
545 return os.path.join(self.path, f)
545 return os.path.join(self.path, f)
546
546
547 def wjoin(self, f):
547 def wjoin(self, f):
548 return os.path.join(self.root, f)
548 return os.path.join(self.root, f)
549
549
550 def file(self, f):
550 def file(self, f):
551 if f[0] == '/':
551 if f[0] == '/':
552 f = f[1:]
552 f = f[1:]
553 return filelog.filelog(self.sopener, f)
553 return filelog.filelog(self.sopener, f)
554
554
555 def changectx(self, changeid):
555 def changectx(self, changeid):
556 return self[changeid]
556 return self[changeid]
557
557
558 def parents(self, changeid=None):
558 def parents(self, changeid=None):
559 '''get list of changectxs for parents of changeid'''
559 '''get list of changectxs for parents of changeid'''
560 return self[changeid].parents()
560 return self[changeid].parents()
561
561
562 def filectx(self, path, changeid=None, fileid=None):
562 def filectx(self, path, changeid=None, fileid=None):
563 """changeid can be a changeset revision, node, or tag.
563 """changeid can be a changeset revision, node, or tag.
564 fileid can be a file revision or node."""
564 fileid can be a file revision or node."""
565 return context.filectx(self, path, changeid, fileid)
565 return context.filectx(self, path, changeid, fileid)
566
566
567 def getcwd(self):
567 def getcwd(self):
568 return self.dirstate.getcwd()
568 return self.dirstate.getcwd()
569
569
570 def pathto(self, f, cwd=None):
570 def pathto(self, f, cwd=None):
571 return self.dirstate.pathto(f, cwd)
571 return self.dirstate.pathto(f, cwd)
572
572
573 def wfile(self, f, mode='r'):
573 def wfile(self, f, mode='r'):
574 return self.wopener(f, mode)
574 return self.wopener(f, mode)
575
575
576 def _link(self, f):
576 def _link(self, f):
577 return os.path.islink(self.wjoin(f))
577 return os.path.islink(self.wjoin(f))
578
578
579 def _loadfilter(self, filter):
579 def _loadfilter(self, filter):
580 if filter not in self.filterpats:
580 if filter not in self.filterpats:
581 l = []
581 l = []
582 for pat, cmd in self.ui.configitems(filter):
582 for pat, cmd in self.ui.configitems(filter):
583 if cmd == '!':
583 if cmd == '!':
584 continue
584 continue
585 mf = matchmod.match(self.root, '', [pat])
585 mf = matchmod.match(self.root, '', [pat])
586 fn = None
586 fn = None
587 params = cmd
587 params = cmd
588 for name, filterfn in self._datafilters.iteritems():
588 for name, filterfn in self._datafilters.iteritems():
589 if cmd.startswith(name):
589 if cmd.startswith(name):
590 fn = filterfn
590 fn = filterfn
591 params = cmd[len(name):].lstrip()
591 params = cmd[len(name):].lstrip()
592 break
592 break
593 if not fn:
593 if not fn:
594 fn = lambda s, c, **kwargs: util.filter(s, c)
594 fn = lambda s, c, **kwargs: util.filter(s, c)
595 # Wrap old filters not supporting keyword arguments
595 # Wrap old filters not supporting keyword arguments
596 if not inspect.getargspec(fn)[2]:
596 if not inspect.getargspec(fn)[2]:
597 oldfn = fn
597 oldfn = fn
598 fn = lambda s, c, **kwargs: oldfn(s, c)
598 fn = lambda s, c, **kwargs: oldfn(s, c)
599 l.append((mf, fn, params))
599 l.append((mf, fn, params))
600 self.filterpats[filter] = l
600 self.filterpats[filter] = l
601 return self.filterpats[filter]
601 return self.filterpats[filter]
602
602
603 def _filter(self, filterpats, filename, data):
603 def _filter(self, filterpats, filename, data):
604 for mf, fn, cmd in filterpats:
604 for mf, fn, cmd in filterpats:
605 if mf(filename):
605 if mf(filename):
606 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
606 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
607 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
607 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
608 break
608 break
609
609
610 return data
610 return data
611
611
612 @propertycache
612 @propertycache
613 def _encodefilterpats(self):
613 def _encodefilterpats(self):
614 return self._loadfilter('encode')
614 return self._loadfilter('encode')
615
615
616 @propertycache
616 @propertycache
617 def _decodefilterpats(self):
617 def _decodefilterpats(self):
618 return self._loadfilter('decode')
618 return self._loadfilter('decode')
619
619
620 def adddatafilter(self, name, filter):
620 def adddatafilter(self, name, filter):
621 self._datafilters[name] = filter
621 self._datafilters[name] = filter
622
622
623 def wread(self, filename):
623 def wread(self, filename):
624 if self._link(filename):
624 if self._link(filename):
625 data = os.readlink(self.wjoin(filename))
625 data = os.readlink(self.wjoin(filename))
626 else:
626 else:
627 data = self.wopener(filename, 'r').read()
627 data = self.wopener(filename, 'r').read()
628 return self._filter(self._encodefilterpats, filename, data)
628 return self._filter(self._encodefilterpats, filename, data)
629
629
630 def wwrite(self, filename, data, flags):
630 def wwrite(self, filename, data, flags):
631 data = self._filter(self._decodefilterpats, filename, data)
631 data = self._filter(self._decodefilterpats, filename, data)
632 try:
632 try:
633 os.unlink(self.wjoin(filename))
633 os.unlink(self.wjoin(filename))
634 except OSError:
634 except OSError:
635 pass
635 pass
636 if 'l' in flags:
636 if 'l' in flags:
637 self.wopener.symlink(data, filename)
637 self.wopener.symlink(data, filename)
638 else:
638 else:
639 self.wopener(filename, 'w').write(data)
639 self.wopener(filename, 'w').write(data)
640 if 'x' in flags:
640 if 'x' in flags:
641 util.set_flags(self.wjoin(filename), False, True)
641 util.set_flags(self.wjoin(filename), False, True)
642
642
643 def wwritedata(self, filename, data):
643 def wwritedata(self, filename, data):
644 return self._filter(self._decodefilterpats, filename, data)
644 return self._filter(self._decodefilterpats, filename, data)
645
645
646 def transaction(self, desc):
646 def transaction(self, desc):
647 tr = self._transref and self._transref() or None
647 tr = self._transref and self._transref() or None
648 if tr and tr.running():
648 if tr and tr.running():
649 return tr.nest()
649 return tr.nest()
650
650
651 # abort here if the journal already exists
651 # abort here if the journal already exists
652 if os.path.exists(self.sjoin("journal")):
652 if os.path.exists(self.sjoin("journal")):
653 raise error.RepoError(
653 raise error.RepoError(
654 _("abandoned transaction found - run hg recover"))
654 _("abandoned transaction found - run hg recover"))
655
655
656 # save dirstate for rollback
656 # save dirstate for rollback
657 try:
657 try:
658 ds = self.opener("dirstate").read()
658 ds = self.opener("dirstate").read()
659 except IOError:
659 except IOError:
660 ds = ""
660 ds = ""
661 self.opener("journal.dirstate", "w").write(ds)
661 self.opener("journal.dirstate", "w").write(ds)
662 self.opener("journal.branch", "w").write(self.dirstate.branch())
662 self.opener("journal.branch", "w").write(self.dirstate.branch())
663 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
663 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
664
664
665 renames = [(self.sjoin("journal"), self.sjoin("undo")),
665 renames = [(self.sjoin("journal"), self.sjoin("undo")),
666 (self.join("journal.dirstate"), self.join("undo.dirstate")),
666 (self.join("journal.dirstate"), self.join("undo.dirstate")),
667 (self.join("journal.branch"), self.join("undo.branch")),
667 (self.join("journal.branch"), self.join("undo.branch")),
668 (self.join("journal.desc"), self.join("undo.desc"))]
668 (self.join("journal.desc"), self.join("undo.desc"))]
669 tr = transaction.transaction(self.ui.warn, self.sopener,
669 tr = transaction.transaction(self.ui.warn, self.sopener,
670 self.sjoin("journal"),
670 self.sjoin("journal"),
671 aftertrans(renames),
671 aftertrans(renames),
672 self.store.createmode)
672 self.store.createmode)
673 self._transref = weakref.ref(tr)
673 self._transref = weakref.ref(tr)
674 return tr
674 return tr
675
675
676 def recover(self):
676 def recover(self):
677 lock = self.lock()
677 lock = self.lock()
678 try:
678 try:
679 if os.path.exists(self.sjoin("journal")):
679 if os.path.exists(self.sjoin("journal")):
680 self.ui.status(_("rolling back interrupted transaction\n"))
680 self.ui.status(_("rolling back interrupted transaction\n"))
681 transaction.rollback(self.sopener, self.sjoin("journal"),
681 transaction.rollback(self.sopener, self.sjoin("journal"),
682 self.ui.warn)
682 self.ui.warn)
683 self.invalidate()
683 self.invalidate()
684 return True
684 return True
685 else:
685 else:
686 self.ui.warn(_("no interrupted transaction available\n"))
686 self.ui.warn(_("no interrupted transaction available\n"))
687 return False
687 return False
688 finally:
688 finally:
689 lock.release()
689 lock.release()
690
690
691 def rollback(self, dryrun=False):
691 def rollback(self, dryrun=False):
692 wlock = lock = None
692 wlock = lock = None
693 try:
693 try:
694 wlock = self.wlock()
694 wlock = self.wlock()
695 lock = self.lock()
695 lock = self.lock()
696 if os.path.exists(self.sjoin("undo")):
696 if os.path.exists(self.sjoin("undo")):
697 try:
697 try:
698 args = self.opener("undo.desc", "r").read().splitlines()
698 args = self.opener("undo.desc", "r").read().splitlines()
699 if len(args) >= 3 and self.ui.verbose:
699 if len(args) >= 3 and self.ui.verbose:
700 desc = _("rolling back to revision %s"
700 desc = _("rolling back to revision %s"
701 " (undo %s: %s)\n") % (
701 " (undo %s: %s)\n") % (
702 int(args[0]) - 1, args[1], args[2])
702 int(args[0]) - 1, args[1], args[2])
703 elif len(args) >= 2:
703 elif len(args) >= 2:
704 desc = _("rolling back to revision %s (undo %s)\n") % (
704 desc = _("rolling back to revision %s (undo %s)\n") % (
705 int(args[0]) - 1, args[1])
705 int(args[0]) - 1, args[1])
706 except IOError:
706 except IOError:
707 desc = _("rolling back unknown transaction\n")
707 desc = _("rolling back unknown transaction\n")
708 self.ui.status(desc)
708 self.ui.status(desc)
709 if dryrun:
709 if dryrun:
710 return
710 return
711 transaction.rollback(self.sopener, self.sjoin("undo"),
711 transaction.rollback(self.sopener, self.sjoin("undo"),
712 self.ui.warn)
712 self.ui.warn)
713 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
713 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
714 try:
714 try:
715 branch = self.opener("undo.branch").read()
715 branch = self.opener("undo.branch").read()
716 self.dirstate.setbranch(branch)
716 self.dirstate.setbranch(branch)
717 except IOError:
717 except IOError:
718 self.ui.warn(_("Named branch could not be reset, "
718 self.ui.warn(_("Named branch could not be reset, "
719 "current branch still is: %s\n")
719 "current branch still is: %s\n")
720 % encoding.tolocal(self.dirstate.branch()))
720 % encoding.tolocal(self.dirstate.branch()))
721 self.invalidate()
721 self.invalidate()
722 self.dirstate.invalidate()
722 self.dirstate.invalidate()
723 self.destroyed()
723 self.destroyed()
724 else:
724 else:
725 self.ui.warn(_("no rollback information available\n"))
725 self.ui.warn(_("no rollback information available\n"))
726 return 1
726 return 1
727 finally:
727 finally:
728 release(lock, wlock)
728 release(lock, wlock)
729
729
730 def invalidatecaches(self):
730 def invalidatecaches(self):
731 self._tags = None
731 self._tags = None
732 self._tagtypes = None
732 self._tagtypes = None
733 self.nodetagscache = None
733 self.nodetagscache = None
734 self._branchcache = None # in UTF-8
734 self._branchcache = None # in UTF-8
735 self._branchcachetip = None
735 self._branchcachetip = None
736
736
737 def invalidate(self):
737 def invalidate(self):
738 for a in "changelog manifest".split():
738 for a in "changelog manifest".split():
739 if a in self.__dict__:
739 if a in self.__dict__:
740 delattr(self, a)
740 delattr(self, a)
741 self.invalidatecaches()
741 self.invalidatecaches()
742
742
743 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
743 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
744 try:
744 try:
745 l = lock.lock(lockname, 0, releasefn, desc=desc)
745 l = lock.lock(lockname, 0, releasefn, desc=desc)
746 except error.LockHeld, inst:
746 except error.LockHeld, inst:
747 if not wait:
747 if not wait:
748 raise
748 raise
749 self.ui.warn(_("waiting for lock on %s held by %r\n") %
749 self.ui.warn(_("waiting for lock on %s held by %r\n") %
750 (desc, inst.locker))
750 (desc, inst.locker))
751 # default to 600 seconds timeout
751 # default to 600 seconds timeout
752 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
752 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
753 releasefn, desc=desc)
753 releasefn, desc=desc)
754 if acquirefn:
754 if acquirefn:
755 acquirefn()
755 acquirefn()
756 return l
756 return l
757
757
758 def lock(self, wait=True):
758 def lock(self, wait=True):
759 '''Lock the repository store (.hg/store) and return a weak reference
759 '''Lock the repository store (.hg/store) and return a weak reference
760 to the lock. Use this before modifying the store (e.g. committing or
760 to the lock. Use this before modifying the store (e.g. committing or
761 stripping). If you are opening a transaction, get a lock as well.)'''
761 stripping). If you are opening a transaction, get a lock as well.)'''
762 l = self._lockref and self._lockref()
762 l = self._lockref and self._lockref()
763 if l is not None and l.held:
763 if l is not None and l.held:
764 l.lock()
764 l.lock()
765 return l
765 return l
766
766
767 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
767 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
768 _('repository %s') % self.origroot)
768 _('repository %s') % self.origroot)
769 self._lockref = weakref.ref(l)
769 self._lockref = weakref.ref(l)
770 return l
770 return l
771
771
772 def wlock(self, wait=True):
772 def wlock(self, wait=True):
773 '''Lock the non-store parts of the repository (everything under
773 '''Lock the non-store parts of the repository (everything under
774 .hg except .hg/store) and return a weak reference to the lock.
774 .hg except .hg/store) and return a weak reference to the lock.
775 Use this before modifying files in .hg.'''
775 Use this before modifying files in .hg.'''
776 l = self._wlockref and self._wlockref()
776 l = self._wlockref and self._wlockref()
777 if l is not None and l.held:
777 if l is not None and l.held:
778 l.lock()
778 l.lock()
779 return l
779 return l
780
780
781 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
781 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
782 self.dirstate.invalidate, _('working directory of %s') %
782 self.dirstate.invalidate, _('working directory of %s') %
783 self.origroot)
783 self.origroot)
784 self._wlockref = weakref.ref(l)
784 self._wlockref = weakref.ref(l)
785 return l
785 return l
786
786
787 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
787 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
788 """
788 """
789 commit an individual file as part of a larger transaction
789 commit an individual file as part of a larger transaction
790 """
790 """
791
791
792 fname = fctx.path()
792 fname = fctx.path()
793 text = fctx.data()
793 text = fctx.data()
794 flog = self.file(fname)
794 flog = self.file(fname)
795 fparent1 = manifest1.get(fname, nullid)
795 fparent1 = manifest1.get(fname, nullid)
796 fparent2 = fparent2o = manifest2.get(fname, nullid)
796 fparent2 = fparent2o = manifest2.get(fname, nullid)
797
797
798 meta = {}
798 meta = {}
799 copy = fctx.renamed()
799 copy = fctx.renamed()
800 if copy and copy[0] != fname:
800 if copy and copy[0] != fname:
801 # Mark the new revision of this file as a copy of another
801 # Mark the new revision of this file as a copy of another
802 # file. This copy data will effectively act as a parent
802 # file. This copy data will effectively act as a parent
803 # of this new revision. If this is a merge, the first
803 # of this new revision. If this is a merge, the first
804 # parent will be the nullid (meaning "look up the copy data")
804 # parent will be the nullid (meaning "look up the copy data")
805 # and the second one will be the other parent. For example:
805 # and the second one will be the other parent. For example:
806 #
806 #
807 # 0 --- 1 --- 3 rev1 changes file foo
807 # 0 --- 1 --- 3 rev1 changes file foo
808 # \ / rev2 renames foo to bar and changes it
808 # \ / rev2 renames foo to bar and changes it
809 # \- 2 -/ rev3 should have bar with all changes and
809 # \- 2 -/ rev3 should have bar with all changes and
810 # should record that bar descends from
810 # should record that bar descends from
811 # bar in rev2 and foo in rev1
811 # bar in rev2 and foo in rev1
812 #
812 #
813 # this allows this merge to succeed:
813 # this allows this merge to succeed:
814 #
814 #
815 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
815 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
816 # \ / merging rev3 and rev4 should use bar@rev2
816 # \ / merging rev3 and rev4 should use bar@rev2
817 # \- 2 --- 4 as the merge base
817 # \- 2 --- 4 as the merge base
818 #
818 #
819
819
820 cfname = copy[0]
820 cfname = copy[0]
821 crev = manifest1.get(cfname)
821 crev = manifest1.get(cfname)
822 newfparent = fparent2
822 newfparent = fparent2
823
823
824 if manifest2: # branch merge
824 if manifest2: # branch merge
825 if fparent2 == nullid or crev is None: # copied on remote side
825 if fparent2 == nullid or crev is None: # copied on remote side
826 if cfname in manifest2:
826 if cfname in manifest2:
827 crev = manifest2[cfname]
827 crev = manifest2[cfname]
828 newfparent = fparent1
828 newfparent = fparent1
829
829
830 # find source in nearest ancestor if we've lost track
830 # find source in nearest ancestor if we've lost track
831 if not crev:
831 if not crev:
832 self.ui.debug(" %s: searching for copy revision for %s\n" %
832 self.ui.debug(" %s: searching for copy revision for %s\n" %
833 (fname, cfname))
833 (fname, cfname))
834 for ancestor in self[None].ancestors():
834 for ancestor in self[None].ancestors():
835 if cfname in ancestor:
835 if cfname in ancestor:
836 crev = ancestor[cfname].filenode()
836 crev = ancestor[cfname].filenode()
837 break
837 break
838
838
839 if crev:
839 if crev:
840 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
840 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
841 meta["copy"] = cfname
841 meta["copy"] = cfname
842 meta["copyrev"] = hex(crev)
842 meta["copyrev"] = hex(crev)
843 fparent1, fparent2 = nullid, newfparent
843 fparent1, fparent2 = nullid, newfparent
844 else:
844 else:
845 self.ui.warn(_("warning: can't find ancestor for '%s' "
845 self.ui.warn(_("warning: can't find ancestor for '%s' "
846 "copied from '%s'!\n") % (fname, cfname))
846 "copied from '%s'!\n") % (fname, cfname))
847
847
848 elif fparent2 != nullid:
848 elif fparent2 != nullid:
849 # is one parent an ancestor of the other?
849 # is one parent an ancestor of the other?
850 fparentancestor = flog.ancestor(fparent1, fparent2)
850 fparentancestor = flog.ancestor(fparent1, fparent2)
851 if fparentancestor == fparent1:
851 if fparentancestor == fparent1:
852 fparent1, fparent2 = fparent2, nullid
852 fparent1, fparent2 = fparent2, nullid
853 elif fparentancestor == fparent2:
853 elif fparentancestor == fparent2:
854 fparent2 = nullid
854 fparent2 = nullid
855
855
856 # is the file changed?
856 # is the file changed?
857 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
857 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
858 changelist.append(fname)
858 changelist.append(fname)
859 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
859 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
860
860
861 # are just the flags changed during merge?
861 # are just the flags changed during merge?
862 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
862 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
863 changelist.append(fname)
863 changelist.append(fname)
864
864
865 return fparent1
865 return fparent1
866
866
867 def commit(self, text="", user=None, date=None, match=None, force=False,
867 def commit(self, text="", user=None, date=None, match=None, force=False,
868 editor=False, extra={}):
868 editor=False, extra={}):
869 """Add a new revision to current repository.
869 """Add a new revision to current repository.
870
870
871 Revision information is gathered from the working directory,
871 Revision information is gathered from the working directory,
872 match can be used to filter the committed files. If editor is
872 match can be used to filter the committed files. If editor is
873 supplied, it is called to get a commit message.
873 supplied, it is called to get a commit message.
874 """
874 """
875
875
876 def fail(f, msg):
876 def fail(f, msg):
877 raise util.Abort('%s: %s' % (f, msg))
877 raise util.Abort('%s: %s' % (f, msg))
878
878
879 if not match:
879 if not match:
880 match = matchmod.always(self.root, '')
880 match = matchmod.always(self.root, '')
881
881
882 if not force:
882 if not force:
883 vdirs = []
883 vdirs = []
884 match.dir = vdirs.append
884 match.dir = vdirs.append
885 match.bad = fail
885 match.bad = fail
886
886
887 wlock = self.wlock()
887 wlock = self.wlock()
888 try:
888 try:
889 wctx = self[None]
889 wctx = self[None]
890 merge = len(wctx.parents()) > 1
890 merge = len(wctx.parents()) > 1
891
891
892 if (not force and merge and match and
892 if (not force and merge and match and
893 (match.files() or match.anypats())):
893 (match.files() or match.anypats())):
894 raise util.Abort(_('cannot partially commit a merge '
894 raise util.Abort(_('cannot partially commit a merge '
895 '(do not specify files or patterns)'))
895 '(do not specify files or patterns)'))
896
896
897 changes = self.status(match=match, clean=force)
897 changes = self.status(match=match, clean=force)
898 if force:
898 if force:
899 changes[0].extend(changes[6]) # mq may commit unchanged files
899 changes[0].extend(changes[6]) # mq may commit unchanged files
900
900
901 # check subrepos
901 # check subrepos
902 subs = []
902 subs = []
903 removedsubs = set()
903 removedsubs = set()
904 for p in wctx.parents():
904 for p in wctx.parents():
905 removedsubs.update(s for s in p.substate if match(s))
905 removedsubs.update(s for s in p.substate if match(s))
906 for s in wctx.substate:
906 for s in wctx.substate:
907 removedsubs.discard(s)
907 removedsubs.discard(s)
908 if match(s) and wctx.sub(s).dirty():
908 if match(s) and wctx.sub(s).dirty():
909 subs.append(s)
909 subs.append(s)
910 if (subs or removedsubs):
910 if (subs or removedsubs):
911 if (not match('.hgsub') and
911 if (not match('.hgsub') and
912 '.hgsub' in (wctx.modified() + wctx.added())):
912 '.hgsub' in (wctx.modified() + wctx.added())):
913 raise util.Abort(_("can't commit subrepos without .hgsub"))
913 raise util.Abort(_("can't commit subrepos without .hgsub"))
914 if '.hgsubstate' not in changes[0]:
914 if '.hgsubstate' not in changes[0]:
915 changes[0].insert(0, '.hgsubstate')
915 changes[0].insert(0, '.hgsubstate')
916
916
917 # make sure all explicit patterns are matched
917 # make sure all explicit patterns are matched
918 if not force and match.files():
918 if not force and match.files():
919 matched = set(changes[0] + changes[1] + changes[2])
919 matched = set(changes[0] + changes[1] + changes[2])
920
920
921 for f in match.files():
921 for f in match.files():
922 if f == '.' or f in matched or f in wctx.substate:
922 if f == '.' or f in matched or f in wctx.substate:
923 continue
923 continue
924 if f in changes[3]: # missing
924 if f in changes[3]: # missing
925 fail(f, _('file not found!'))
925 fail(f, _('file not found!'))
926 if f in vdirs: # visited directory
926 if f in vdirs: # visited directory
927 d = f + '/'
927 d = f + '/'
928 for mf in matched:
928 for mf in matched:
929 if mf.startswith(d):
929 if mf.startswith(d):
930 break
930 break
931 else:
931 else:
932 fail(f, _("no match under directory!"))
932 fail(f, _("no match under directory!"))
933 elif f not in self.dirstate:
933 elif f not in self.dirstate:
934 fail(f, _("file not tracked!"))
934 fail(f, _("file not tracked!"))
935
935
936 if (not force and not extra.get("close") and not merge
936 if (not force and not extra.get("close") and not merge
937 and not (changes[0] or changes[1] or changes[2])
937 and not (changes[0] or changes[1] or changes[2])
938 and wctx.branch() == wctx.p1().branch()):
938 and wctx.branch() == wctx.p1().branch()):
939 return None
939 return None
940
940
941 ms = mergemod.mergestate(self)
941 ms = mergemod.mergestate(self)
942 for f in changes[0]:
942 for f in changes[0]:
943 if f in ms and ms[f] == 'u':
943 if f in ms and ms[f] == 'u':
944 raise util.Abort(_("unresolved merge conflicts "
944 raise util.Abort(_("unresolved merge conflicts "
945 "(see hg resolve)"))
945 "(see hg resolve)"))
946
946
947 cctx = context.workingctx(self, text, user, date, extra, changes)
947 cctx = context.workingctx(self, text, user, date, extra, changes)
948 if editor:
948 if editor:
949 cctx._text = editor(self, cctx, subs)
949 cctx._text = editor(self, cctx, subs)
950 edited = (text != cctx._text)
950 edited = (text != cctx._text)
951
951
952 # commit subs
952 # commit subs
953 if subs or removedsubs:
953 if subs or removedsubs:
954 state = wctx.substate.copy()
954 state = wctx.substate.copy()
955 for s in sorted(subs):
955 for s in sorted(subs):
956 sub = wctx.sub(s)
956 sub = wctx.sub(s)
957 self.ui.status(_('committing subrepository %s\n') %
957 self.ui.status(_('committing subrepository %s\n') %
958 subrepo.subrelpath(sub))
958 subrepo.subrelpath(sub))
959 sr = sub.commit(cctx._text, user, date)
959 sr = sub.commit(cctx._text, user, date)
960 state[s] = (state[s][0], sr)
960 state[s] = (state[s][0], sr)
961 subrepo.writestate(self, state)
961 subrepo.writestate(self, state)
962
962
963 # Save commit message in case this transaction gets rolled back
963 # Save commit message in case this transaction gets rolled back
964 # (e.g. by a pretxncommit hook). Leave the content alone on
964 # (e.g. by a pretxncommit hook). Leave the content alone on
965 # the assumption that the user will use the same editor again.
965 # the assumption that the user will use the same editor again.
966 msgfile = self.opener('last-message.txt', 'wb')
966 msgfile = self.opener('last-message.txt', 'wb')
967 msgfile.write(cctx._text)
967 msgfile.write(cctx._text)
968 msgfile.close()
968 msgfile.close()
969
969
970 p1, p2 = self.dirstate.parents()
970 p1, p2 = self.dirstate.parents()
971 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
971 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
972 try:
972 try:
973 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
973 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
974 ret = self.commitctx(cctx, True)
974 ret = self.commitctx(cctx, True)
975 except:
975 except:
976 if edited:
976 if edited:
977 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
977 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
978 self.ui.write(
978 self.ui.write(
979 _('note: commit message saved in %s\n') % msgfn)
979 _('note: commit message saved in %s\n') % msgfn)
980 raise
980 raise
981
981
982 # update dirstate and mergestate
982 # update dirstate and mergestate
983 for f in changes[0] + changes[1]:
983 for f in changes[0] + changes[1]:
984 self.dirstate.normal(f)
984 self.dirstate.normal(f)
985 for f in changes[2]:
985 for f in changes[2]:
986 self.dirstate.forget(f)
986 self.dirstate.forget(f)
987 self.dirstate.setparents(ret)
987 self.dirstate.setparents(ret)
988 ms.reset()
988 ms.reset()
989 finally:
989 finally:
990 wlock.release()
990 wlock.release()
991
991
992 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
992 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
993 return ret
993 return ret
994
994
995 def commitctx(self, ctx, error=False):
995 def commitctx(self, ctx, error=False):
996 """Add a new revision to current repository.
996 """Add a new revision to current repository.
997 Revision information is passed via the context argument.
997 Revision information is passed via the context argument.
998 """
998 """
999
999
1000 tr = lock = None
1000 tr = lock = None
1001 removed = list(ctx.removed())
1001 removed = list(ctx.removed())
1002 p1, p2 = ctx.p1(), ctx.p2()
1002 p1, p2 = ctx.p1(), ctx.p2()
1003 m1 = p1.manifest().copy()
1003 m1 = p1.manifest().copy()
1004 m2 = p2.manifest()
1004 m2 = p2.manifest()
1005 user = ctx.user()
1005 user = ctx.user()
1006
1006
1007 lock = self.lock()
1007 lock = self.lock()
1008 try:
1008 try:
1009 tr = self.transaction("commit")
1009 tr = self.transaction("commit")
1010 trp = weakref.proxy(tr)
1010 trp = weakref.proxy(tr)
1011
1011
1012 # check in files
1012 # check in files
1013 new = {}
1013 new = {}
1014 changed = []
1014 changed = []
1015 linkrev = len(self)
1015 linkrev = len(self)
1016 for f in sorted(ctx.modified() + ctx.added()):
1016 for f in sorted(ctx.modified() + ctx.added()):
1017 self.ui.note(f + "\n")
1017 self.ui.note(f + "\n")
1018 try:
1018 try:
1019 fctx = ctx[f]
1019 fctx = ctx[f]
1020 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1020 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1021 changed)
1021 changed)
1022 m1.set(f, fctx.flags())
1022 m1.set(f, fctx.flags())
1023 except OSError, inst:
1023 except OSError, inst:
1024 self.ui.warn(_("trouble committing %s!\n") % f)
1024 self.ui.warn(_("trouble committing %s!\n") % f)
1025 raise
1025 raise
1026 except IOError, inst:
1026 except IOError, inst:
1027 errcode = getattr(inst, 'errno', errno.ENOENT)
1027 errcode = getattr(inst, 'errno', errno.ENOENT)
1028 if error or errcode and errcode != errno.ENOENT:
1028 if error or errcode and errcode != errno.ENOENT:
1029 self.ui.warn(_("trouble committing %s!\n") % f)
1029 self.ui.warn(_("trouble committing %s!\n") % f)
1030 raise
1030 raise
1031 else:
1031 else:
1032 removed.append(f)
1032 removed.append(f)
1033
1033
1034 # update manifest
1034 # update manifest
1035 m1.update(new)
1035 m1.update(new)
1036 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1036 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1037 drop = [f for f in removed if f in m1]
1037 drop = [f for f in removed if f in m1]
1038 for f in drop:
1038 for f in drop:
1039 del m1[f]
1039 del m1[f]
1040 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1040 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1041 p2.manifestnode(), (new, drop))
1041 p2.manifestnode(), (new, drop))
1042
1042
1043 # update changelog
1043 # update changelog
1044 self.changelog.delayupdate()
1044 self.changelog.delayupdate()
1045 n = self.changelog.add(mn, changed + removed, ctx.description(),
1045 n = self.changelog.add(mn, changed + removed, ctx.description(),
1046 trp, p1.node(), p2.node(),
1046 trp, p1.node(), p2.node(),
1047 user, ctx.date(), ctx.extra().copy())
1047 user, ctx.date(), ctx.extra().copy())
1048 p = lambda: self.changelog.writepending() and self.root or ""
1048 p = lambda: self.changelog.writepending() and self.root or ""
1049 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1049 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1050 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1050 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1051 parent2=xp2, pending=p)
1051 parent2=xp2, pending=p)
1052 self.changelog.finalize(trp)
1052 self.changelog.finalize(trp)
1053 tr.close()
1053 tr.close()
1054
1054
1055 if self._branchcache:
1055 if self._branchcache:
1056 self.updatebranchcache()
1056 self.updatebranchcache()
1057 return n
1057 return n
1058 finally:
1058 finally:
1059 if tr:
1059 if tr:
1060 tr.release()
1060 tr.release()
1061 lock.release()
1061 lock.release()
1062
1062
1063 def destroyed(self):
1063 def destroyed(self):
1064 '''Inform the repository that nodes have been destroyed.
1064 '''Inform the repository that nodes have been destroyed.
1065 Intended for use by strip and rollback, so there's a common
1065 Intended for use by strip and rollback, so there's a common
1066 place for anything that has to be done after destroying history.'''
1066 place for anything that has to be done after destroying history.'''
1067 # XXX it might be nice if we could take the list of destroyed
1067 # XXX it might be nice if we could take the list of destroyed
1068 # nodes, but I don't see an easy way for rollback() to do that
1068 # nodes, but I don't see an easy way for rollback() to do that
1069
1069
1070 # Ensure the persistent tag cache is updated. Doing it now
1070 # Ensure the persistent tag cache is updated. Doing it now
1071 # means that the tag cache only has to worry about destroyed
1071 # means that the tag cache only has to worry about destroyed
1072 # heads immediately after a strip/rollback. That in turn
1072 # heads immediately after a strip/rollback. That in turn
1073 # guarantees that "cachetip == currenttip" (comparing both rev
1073 # guarantees that "cachetip == currenttip" (comparing both rev
1074 # and node) always means no nodes have been added or destroyed.
1074 # and node) always means no nodes have been added or destroyed.
1075
1075
1076 # XXX this is suboptimal when qrefresh'ing: we strip the current
1076 # XXX this is suboptimal when qrefresh'ing: we strip the current
1077 # head, refresh the tag cache, then immediately add a new head.
1077 # head, refresh the tag cache, then immediately add a new head.
1078 # But I think doing it this way is necessary for the "instant
1078 # But I think doing it this way is necessary for the "instant
1079 # tag cache retrieval" case to work.
1079 # tag cache retrieval" case to work.
1080 self.invalidatecaches()
1080 self.invalidatecaches()
1081
1081
1082 def walk(self, match, node=None):
1082 def walk(self, match, node=None):
1083 '''
1083 '''
1084 walk recursively through the directory tree or a given
1084 walk recursively through the directory tree or a given
1085 changeset, finding all files matched by the match
1085 changeset, finding all files matched by the match
1086 function
1086 function
1087 '''
1087 '''
1088 return self[node].walk(match)
1088 return self[node].walk(match)
1089
1089
1090 def status(self, node1='.', node2=None, match=None,
1090 def status(self, node1='.', node2=None, match=None,
1091 ignored=False, clean=False, unknown=False,
1091 ignored=False, clean=False, unknown=False,
1092 listsubrepos=False):
1092 listsubrepos=False):
1093 """return status of files between two nodes or node and working directory
1093 """return status of files between two nodes or node and working directory
1094
1094
1095 If node1 is None, use the first dirstate parent instead.
1095 If node1 is None, use the first dirstate parent instead.
1096 If node2 is None, compare node1 with working directory.
1096 If node2 is None, compare node1 with working directory.
1097 """
1097 """
1098
1098
1099 def mfmatches(ctx):
1099 def mfmatches(ctx):
1100 mf = ctx.manifest().copy()
1100 mf = ctx.manifest().copy()
1101 for fn in mf.keys():
1101 for fn in mf.keys():
1102 if not match(fn):
1102 if not match(fn):
1103 del mf[fn]
1103 del mf[fn]
1104 return mf
1104 return mf
1105
1105
1106 if isinstance(node1, context.changectx):
1106 if isinstance(node1, context.changectx):
1107 ctx1 = node1
1107 ctx1 = node1
1108 else:
1108 else:
1109 ctx1 = self[node1]
1109 ctx1 = self[node1]
1110 if isinstance(node2, context.changectx):
1110 if isinstance(node2, context.changectx):
1111 ctx2 = node2
1111 ctx2 = node2
1112 else:
1112 else:
1113 ctx2 = self[node2]
1113 ctx2 = self[node2]
1114
1114
1115 working = ctx2.rev() is None
1115 working = ctx2.rev() is None
1116 parentworking = working and ctx1 == self['.']
1116 parentworking = working and ctx1 == self['.']
1117 match = match or matchmod.always(self.root, self.getcwd())
1117 match = match or matchmod.always(self.root, self.getcwd())
1118 listignored, listclean, listunknown = ignored, clean, unknown
1118 listignored, listclean, listunknown = ignored, clean, unknown
1119
1119
1120 # load earliest manifest first for caching reasons
1120 # load earliest manifest first for caching reasons
1121 if not working and ctx2.rev() < ctx1.rev():
1121 if not working and ctx2.rev() < ctx1.rev():
1122 ctx2.manifest()
1122 ctx2.manifest()
1123
1123
1124 if not parentworking:
1124 if not parentworking:
1125 def bad(f, msg):
1125 def bad(f, msg):
1126 if f not in ctx1:
1126 if f not in ctx1:
1127 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1127 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1128 match.bad = bad
1128 match.bad = bad
1129
1129
1130 if working: # we need to scan the working dir
1130 if working: # we need to scan the working dir
1131 subrepos = []
1131 subrepos = []
1132 if '.hgsub' in self.dirstate:
1132 if '.hgsub' in self.dirstate:
1133 subrepos = ctx1.substate.keys()
1133 subrepos = ctx1.substate.keys()
1134 s = self.dirstate.status(match, subrepos, listignored,
1134 s = self.dirstate.status(match, subrepos, listignored,
1135 listclean, listunknown)
1135 listclean, listunknown)
1136 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1136 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1137
1137
1138 # check for any possibly clean files
1138 # check for any possibly clean files
1139 if parentworking and cmp:
1139 if parentworking and cmp:
1140 fixup = []
1140 fixup = []
1141 # do a full compare of any files that might have changed
1141 # do a full compare of any files that might have changed
1142 for f in sorted(cmp):
1142 for f in sorted(cmp):
1143 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1143 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1144 or ctx1[f].cmp(ctx2[f])):
1144 or ctx1[f].cmp(ctx2[f])):
1145 modified.append(f)
1145 modified.append(f)
1146 else:
1146 else:
1147 fixup.append(f)
1147 fixup.append(f)
1148
1148
1149 # update dirstate for files that are actually clean
1149 # update dirstate for files that are actually clean
1150 if fixup:
1150 if fixup:
1151 if listclean:
1151 if listclean:
1152 clean += fixup
1152 clean += fixup
1153
1153
1154 try:
1154 try:
1155 # updating the dirstate is optional
1155 # updating the dirstate is optional
1156 # so we don't wait on the lock
1156 # so we don't wait on the lock
1157 wlock = self.wlock(False)
1157 wlock = self.wlock(False)
1158 try:
1158 try:
1159 for f in fixup:
1159 for f in fixup:
1160 self.dirstate.normal(f)
1160 self.dirstate.normal(f)
1161 finally:
1161 finally:
1162 wlock.release()
1162 wlock.release()
1163 except error.LockError:
1163 except error.LockError:
1164 pass
1164 pass
1165
1165
1166 if not parentworking:
1166 if not parentworking:
1167 mf1 = mfmatches(ctx1)
1167 mf1 = mfmatches(ctx1)
1168 if working:
1168 if working:
1169 # we are comparing working dir against non-parent
1169 # we are comparing working dir against non-parent
1170 # generate a pseudo-manifest for the working dir
1170 # generate a pseudo-manifest for the working dir
1171 mf2 = mfmatches(self['.'])
1171 mf2 = mfmatches(self['.'])
1172 for f in cmp + modified + added:
1172 for f in cmp + modified + added:
1173 mf2[f] = None
1173 mf2[f] = None
1174 mf2.set(f, ctx2.flags(f))
1174 mf2.set(f, ctx2.flags(f))
1175 for f in removed:
1175 for f in removed:
1176 if f in mf2:
1176 if f in mf2:
1177 del mf2[f]
1177 del mf2[f]
1178 else:
1178 else:
1179 # we are comparing two revisions
1179 # we are comparing two revisions
1180 deleted, unknown, ignored = [], [], []
1180 deleted, unknown, ignored = [], [], []
1181 mf2 = mfmatches(ctx2)
1181 mf2 = mfmatches(ctx2)
1182
1182
1183 modified, added, clean = [], [], []
1183 modified, added, clean = [], [], []
1184 for fn in mf2:
1184 for fn in mf2:
1185 if fn in mf1:
1185 if fn in mf1:
1186 if (mf1.flags(fn) != mf2.flags(fn) or
1186 if (mf1.flags(fn) != mf2.flags(fn) or
1187 (mf1[fn] != mf2[fn] and
1187 (mf1[fn] != mf2[fn] and
1188 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1188 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1189 modified.append(fn)
1189 modified.append(fn)
1190 elif listclean:
1190 elif listclean:
1191 clean.append(fn)
1191 clean.append(fn)
1192 del mf1[fn]
1192 del mf1[fn]
1193 else:
1193 else:
1194 added.append(fn)
1194 added.append(fn)
1195 removed = mf1.keys()
1195 removed = mf1.keys()
1196
1196
1197 r = modified, added, removed, deleted, unknown, ignored, clean
1197 r = modified, added, removed, deleted, unknown, ignored, clean
1198
1198
1199 if listsubrepos:
1199 if listsubrepos:
1200 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1200 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1201 if working:
1201 if working:
1202 rev2 = None
1202 rev2 = None
1203 else:
1203 else:
1204 rev2 = ctx2.substate[subpath][1]
1204 rev2 = ctx2.substate[subpath][1]
1205 try:
1205 try:
1206 submatch = matchmod.narrowmatcher(subpath, match)
1206 submatch = matchmod.narrowmatcher(subpath, match)
1207 s = sub.status(rev2, match=submatch, ignored=listignored,
1207 s = sub.status(rev2, match=submatch, ignored=listignored,
1208 clean=listclean, unknown=listunknown,
1208 clean=listclean, unknown=listunknown,
1209 listsubrepos=True)
1209 listsubrepos=True)
1210 for rfiles, sfiles in zip(r, s):
1210 for rfiles, sfiles in zip(r, s):
1211 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1211 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1212 except error.LookupError:
1212 except error.LookupError:
1213 self.ui.status(_("skipping missing subrepository: %s\n")
1213 self.ui.status(_("skipping missing subrepository: %s\n")
1214 % subpath)
1214 % subpath)
1215
1215
1216 [l.sort() for l in r]
1216 [l.sort() for l in r]
1217 return r
1217 return r
1218
1218
1219 def heads(self, start=None):
1219 def heads(self, start=None):
1220 heads = self.changelog.heads(start)
1220 heads = self.changelog.heads(start)
1221 # sort the output in rev descending order
1221 # sort the output in rev descending order
1222 return sorted(heads, key=self.changelog.rev, reverse=True)
1222 return sorted(heads, key=self.changelog.rev, reverse=True)
1223
1223
1224 def branchheads(self, branch=None, start=None, closed=False):
1224 def branchheads(self, branch=None, start=None, closed=False):
1225 '''return a (possibly filtered) list of heads for the given branch
1225 '''return a (possibly filtered) list of heads for the given branch
1226
1226
1227 Heads are returned in topological order, from newest to oldest.
1227 Heads are returned in topological order, from newest to oldest.
1228 If branch is None, use the dirstate branch.
1228 If branch is None, use the dirstate branch.
1229 If start is not None, return only heads reachable from start.
1229 If start is not None, return only heads reachable from start.
1230 If closed is True, return heads that are marked as closed as well.
1230 If closed is True, return heads that are marked as closed as well.
1231 '''
1231 '''
1232 if branch is None:
1232 if branch is None:
1233 branch = self[None].branch()
1233 branch = self[None].branch()
1234 branches = self.branchmap()
1234 branches = self.branchmap()
1235 if branch not in branches:
1235 if branch not in branches:
1236 return []
1236 return []
1237 # the cache returns heads ordered lowest to highest
1237 # the cache returns heads ordered lowest to highest
1238 bheads = list(reversed(branches[branch]))
1238 bheads = list(reversed(branches[branch]))
1239 if start is not None:
1239 if start is not None:
1240 # filter out the heads that cannot be reached from startrev
1240 # filter out the heads that cannot be reached from startrev
1241 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1241 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1242 bheads = [h for h in bheads if h in fbheads]
1242 bheads = [h for h in bheads if h in fbheads]
1243 if not closed:
1243 if not closed:
1244 bheads = [h for h in bheads if
1244 bheads = [h for h in bheads if
1245 ('close' not in self.changelog.read(h)[5])]
1245 ('close' not in self.changelog.read(h)[5])]
1246 return bheads
1246 return bheads
1247
1247
1248 def branches(self, nodes):
1248 def branches(self, nodes):
1249 if not nodes:
1249 if not nodes:
1250 nodes = [self.changelog.tip()]
1250 nodes = [self.changelog.tip()]
1251 b = []
1251 b = []
1252 for n in nodes:
1252 for n in nodes:
1253 t = n
1253 t = n
1254 while 1:
1254 while 1:
1255 p = self.changelog.parents(n)
1255 p = self.changelog.parents(n)
1256 if p[1] != nullid or p[0] == nullid:
1256 if p[1] != nullid or p[0] == nullid:
1257 b.append((t, n, p[0], p[1]))
1257 b.append((t, n, p[0], p[1]))
1258 break
1258 break
1259 n = p[0]
1259 n = p[0]
1260 return b
1260 return b
1261
1261
1262 def between(self, pairs):
1262 def between(self, pairs):
1263 r = []
1263 r = []
1264
1264
1265 for top, bottom in pairs:
1265 for top, bottom in pairs:
1266 n, l, i = top, [], 0
1266 n, l, i = top, [], 0
1267 f = 1
1267 f = 1
1268
1268
1269 while n != bottom and n != nullid:
1269 while n != bottom and n != nullid:
1270 p = self.changelog.parents(n)[0]
1270 p = self.changelog.parents(n)[0]
1271 if i == f:
1271 if i == f:
1272 l.append(n)
1272 l.append(n)
1273 f = f * 2
1273 f = f * 2
1274 n = p
1274 n = p
1275 i += 1
1275 i += 1
1276
1276
1277 r.append(l)
1277 r.append(l)
1278
1278
1279 return r
1279 return r
1280
1280
1281 def pull(self, remote, heads=None, force=False):
1281 def pull(self, remote, heads=None, force=False):
1282 lock = self.lock()
1282 lock = self.lock()
1283 try:
1283 try:
1284 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1284 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1285 force=force)
1285 force=force)
1286 common, fetch, rheads = tmp
1286 common, fetch, rheads = tmp
1287 if not fetch:
1287 if not fetch:
1288 self.ui.status(_("no changes found\n"))
1288 self.ui.status(_("no changes found\n"))
1289 return 0
1289 return 0
1290
1290
1291 if heads is None and fetch == [nullid]:
1291 if heads is None and fetch == [nullid]:
1292 self.ui.status(_("requesting all changes\n"))
1292 self.ui.status(_("requesting all changes\n"))
1293 elif heads is None and remote.capable('changegroupsubset'):
1293 elif heads is None and remote.capable('changegroupsubset'):
1294 # issue1320, avoid a race if remote changed after discovery
1294 # issue1320, avoid a race if remote changed after discovery
1295 heads = rheads
1295 heads = rheads
1296
1296
1297 if heads is None:
1297 if heads is None:
1298 cg = remote.changegroup(fetch, 'pull')
1298 cg = remote.changegroup(fetch, 'pull')
1299 else:
1299 else:
1300 if not remote.capable('changegroupsubset'):
1300 if not remote.capable('changegroupsubset'):
1301 raise util.Abort(_("partial pull cannot be done because "
1301 raise util.Abort(_("partial pull cannot be done because "
1302 "other repository doesn't support "
1302 "other repository doesn't support "
1303 "changegroupsubset."))
1303 "changegroupsubset."))
1304 cg = remote.changegroupsubset(fetch, heads, 'pull')
1304 cg = remote.changegroupsubset(fetch, heads, 'pull')
1305 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1305 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1306 finally:
1306 finally:
1307 lock.release()
1307 lock.release()
1308
1308
1309 def push(self, remote, force=False, revs=None, newbranch=False):
1309 def push(self, remote, force=False, revs=None, newbranch=False):
1310 '''Push outgoing changesets (limited by revs) from the current
1310 '''Push outgoing changesets (limited by revs) from the current
1311 repository to remote. Return an integer:
1311 repository to remote. Return an integer:
1312 - 0 means HTTP error *or* nothing to push
1312 - 0 means HTTP error *or* nothing to push
1313 - 1 means we pushed and remote head count is unchanged *or*
1313 - 1 means we pushed and remote head count is unchanged *or*
1314 we have outgoing changesets but refused to push
1314 we have outgoing changesets but refused to push
1315 - other values as described by addchangegroup()
1315 - other values as described by addchangegroup()
1316 '''
1316 '''
1317 # there are two ways to push to remote repo:
1317 # there are two ways to push to remote repo:
1318 #
1318 #
1319 # addchangegroup assumes local user can lock remote
1319 # addchangegroup assumes local user can lock remote
1320 # repo (local filesystem, old ssh servers).
1320 # repo (local filesystem, old ssh servers).
1321 #
1321 #
1322 # unbundle assumes local user cannot lock remote repo (new ssh
1322 # unbundle assumes local user cannot lock remote repo (new ssh
1323 # servers, http servers).
1323 # servers, http servers).
1324
1324
1325 lock = None
1325 lock = None
1326 unbundle = remote.capable('unbundle')
1326 unbundle = remote.capable('unbundle')
1327 if not unbundle:
1327 if not unbundle:
1328 lock = remote.lock()
1328 lock = remote.lock()
1329 try:
1329 try:
1330 ret = discovery.prepush(self, remote, force, revs, newbranch)
1330 ret = discovery.prepush(self, remote, force, revs, newbranch)
1331 if ret[0] is None:
1331 if ret[0] is None:
1332 # and here we return 0 for "nothing to push" or 1 for
1332 # and here we return 0 for "nothing to push" or 1 for
1333 # "something to push but I refuse"
1333 # "something to push but I refuse"
1334 return ret[1]
1334 return ret[1]
1335
1335
1336 cg, remote_heads = ret
1336 cg, remote_heads = ret
1337 if unbundle:
1337 if unbundle:
1338 # local repo finds heads on server, finds out what revs it must
1338 # local repo finds heads on server, finds out what revs it must
1339 # push. once revs transferred, if server finds it has
1339 # push. once revs transferred, if server finds it has
1340 # different heads (someone else won commit/push race), server
1340 # different heads (someone else won commit/push race), server
1341 # aborts.
1341 # aborts.
1342 if force:
1342 if force:
1343 remote_heads = ['force']
1343 remote_heads = ['force']
1344 # ssh: return remote's addchangegroup()
1344 # ssh: return remote's addchangegroup()
1345 # http: return remote's addchangegroup() or 0 for error
1345 # http: return remote's addchangegroup() or 0 for error
1346 return remote.unbundle(cg, remote_heads, 'push')
1346 return remote.unbundle(cg, remote_heads, 'push')
1347 else:
1347 else:
1348 # we return an integer indicating remote head count change
1348 # we return an integer indicating remote head count change
1349 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1349 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1350 finally:
1350 finally:
1351 if lock is not None:
1351 if lock is not None:
1352 lock.release()
1352 lock.release()
1353
1353
1354 def changegroupinfo(self, nodes, source):
1354 def changegroupinfo(self, nodes, source):
1355 if self.ui.verbose or source == 'bundle':
1355 if self.ui.verbose or source == 'bundle':
1356 self.ui.status(_("%d changesets found\n") % len(nodes))
1356 self.ui.status(_("%d changesets found\n") % len(nodes))
1357 if self.ui.debugflag:
1357 if self.ui.debugflag:
1358 self.ui.debug("list of changesets:\n")
1358 self.ui.debug("list of changesets:\n")
1359 for node in nodes:
1359 for node in nodes:
1360 self.ui.debug("%s\n" % hex(node))
1360 self.ui.debug("%s\n" % hex(node))
1361
1361
1362 def changegroupsubset(self, bases, heads, source, extranodes=None):
1362 def changegroupsubset(self, bases, heads, source, extranodes=None):
1363 """Compute a changegroup consisting of all the nodes that are
1363 """Compute a changegroup consisting of all the nodes that are
1364 descendents of any of the bases and ancestors of any of the heads.
1364 descendents of any of the bases and ancestors of any of the heads.
1365 Return a chunkbuffer object whose read() method will return
1365 Return a chunkbuffer object whose read() method will return
1366 successive changegroup chunks.
1366 successive changegroup chunks.
1367
1367
1368 It is fairly complex as determining which filenodes and which
1368 It is fairly complex as determining which filenodes and which
1369 manifest nodes need to be included for the changeset to be complete
1369 manifest nodes need to be included for the changeset to be complete
1370 is non-trivial.
1370 is non-trivial.
1371
1371
1372 Another wrinkle is doing the reverse, figuring out which changeset in
1372 Another wrinkle is doing the reverse, figuring out which changeset in
1373 the changegroup a particular filenode or manifestnode belongs to.
1373 the changegroup a particular filenode or manifestnode belongs to.
1374
1374
1375 The caller can specify some nodes that must be included in the
1375 The caller can specify some nodes that must be included in the
1376 changegroup using the extranodes argument. It should be a dict
1376 changegroup using the extranodes argument. It should be a dict
1377 where the keys are the filenames (or 1 for the manifest), and the
1377 where the keys are the filenames (or 1 for the manifest), and the
1378 values are lists of (node, linknode) tuples, where node is a wanted
1378 values are lists of (node, linknode) tuples, where node is a wanted
1379 node and linknode is the changelog node that should be transmitted as
1379 node and linknode is the changelog node that should be transmitted as
1380 the linkrev.
1380 the linkrev.
1381 """
1381 """
1382
1382
1383 # Set up some initial variables
1383 # Set up some initial variables
1384 # Make it easy to refer to self.changelog
1384 # Make it easy to refer to self.changelog
1385 cl = self.changelog
1385 cl = self.changelog
1386 # Compute the list of changesets in this changegroup.
1386 # Compute the list of changesets in this changegroup.
1387 # Some bases may turn out to be superfluous, and some heads may be
1387 # Some bases may turn out to be superfluous, and some heads may be
1388 # too. nodesbetween will return the minimal set of bases and heads
1388 # too. nodesbetween will return the minimal set of bases and heads
1389 # necessary to re-create the changegroup.
1389 # necessary to re-create the changegroup.
1390 if not bases:
1390 if not bases:
1391 bases = [nullid]
1391 bases = [nullid]
1392 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1392 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1393
1393
1394 if extranodes is None:
1394 if extranodes is None:
1395 # can we go through the fast path ?
1395 # can we go through the fast path ?
1396 heads.sort()
1396 heads.sort()
1397 allheads = self.heads()
1397 allheads = self.heads()
1398 allheads.sort()
1398 allheads.sort()
1399 if heads == allheads:
1399 if heads == allheads:
1400 return self._changegroup(msng_cl_lst, source)
1400 return self._changegroup(msng_cl_lst, source)
1401
1401
1402 # slow path
1402 # slow path
1403 self.hook('preoutgoing', throw=True, source=source)
1403 self.hook('preoutgoing', throw=True, source=source)
1404
1404
1405 self.changegroupinfo(msng_cl_lst, source)
1405 self.changegroupinfo(msng_cl_lst, source)
1406
1406
1407 # We assume that all ancestors of bases are known
1407 # We assume that all ancestors of bases are known
1408 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1408 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1409
1409
1410 # Make it easy to refer to self.manifest
1410 # Make it easy to refer to self.manifest
1411 mnfst = self.manifest
1411 mnfst = self.manifest
1412 # We don't know which manifests are missing yet
1412 # We don't know which manifests are missing yet
1413 msng_mnfst_set = {}
1413 msng_mnfst_set = {}
1414 # Nor do we know which filenodes are missing.
1414 # Nor do we know which filenodes are missing.
1415 msng_filenode_set = {}
1415 msng_filenode_set = {}
1416
1416
1417 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1417 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1418 junk = None
1418 junk = None
1419
1419
1420 # A changeset always belongs to itself, so the changenode lookup
1420 # A changeset always belongs to itself, so the changenode lookup
1421 # function for a changenode is identity.
1421 # function for a changenode is identity.
1422 def identity(x):
1422 def identity(x):
1423 return x
1423 return x
1424
1424
1425 # A function generating function that sets up the initial environment
1425 # A function generating function that sets up the initial environment
1426 # the inner function.
1426 # the inner function.
1427 def filenode_collector(changedfiles):
1427 def filenode_collector(changedfiles):
1428 # This gathers information from each manifestnode included in the
1428 # This gathers information from each manifestnode included in the
1429 # changegroup about which filenodes the manifest node references
1429 # changegroup about which filenodes the manifest node references
1430 # so we can include those in the changegroup too.
1430 # so we can include those in the changegroup too.
1431 #
1431 #
1432 # It also remembers which changenode each filenode belongs to. It
1432 # It also remembers which changenode each filenode belongs to. It
1433 # does this by assuming the a filenode belongs to the changenode
1433 # does this by assuming the a filenode belongs to the changenode
1434 # the first manifest that references it belongs to.
1434 # the first manifest that references it belongs to.
1435 def collect_msng_filenodes(mnfstnode):
1435 def collect_msng_filenodes(mnfstnode):
1436 r = mnfst.rev(mnfstnode)
1436 r = mnfst.rev(mnfstnode)
1437 if mnfst.deltaparent(r) in mnfst.parentrevs(r):
1437 if mnfst.deltaparent(r) in mnfst.parentrevs(r):
1438 # If the previous rev is one of the parents,
1438 # If the previous rev is one of the parents,
1439 # we only need to see a diff.
1439 # we only need to see a diff.
1440 deltamf = mnfst.readdelta(mnfstnode)
1440 deltamf = mnfst.readdelta(mnfstnode)
1441 # For each line in the delta
1441 # For each line in the delta
1442 for f, fnode in deltamf.iteritems():
1442 for f, fnode in deltamf.iteritems():
1443 # And if the file is in the list of files we care
1443 # And if the file is in the list of files we care
1444 # about.
1444 # about.
1445 if f in changedfiles:
1445 if f in changedfiles:
1446 # Get the changenode this manifest belongs to
1446 # Get the changenode this manifest belongs to
1447 clnode = msng_mnfst_set[mnfstnode]
1447 clnode = msng_mnfst_set[mnfstnode]
1448 # Create the set of filenodes for the file if
1448 # Create the set of filenodes for the file if
1449 # there isn't one already.
1449 # there isn't one already.
1450 ndset = msng_filenode_set.setdefault(f, {})
1450 ndset = msng_filenode_set.setdefault(f, {})
1451 # And set the filenode's changelog node to the
1451 # And set the filenode's changelog node to the
1452 # manifest's if it hasn't been set already.
1452 # manifest's if it hasn't been set already.
1453 ndset.setdefault(fnode, clnode)
1453 ndset.setdefault(fnode, clnode)
1454 else:
1454 else:
1455 # Otherwise we need a full manifest.
1455 # Otherwise we need a full manifest.
1456 m = mnfst.read(mnfstnode)
1456 m = mnfst.read(mnfstnode)
1457 # For every file in we care about.
1457 # For every file in we care about.
1458 for f in changedfiles:
1458 for f in changedfiles:
1459 fnode = m.get(f, None)
1459 fnode = m.get(f, None)
1460 # If it's in the manifest
1460 # If it's in the manifest
1461 if fnode is not None:
1461 if fnode is not None:
1462 # See comments above.
1462 # See comments above.
1463 clnode = msng_mnfst_set[mnfstnode]
1463 clnode = msng_mnfst_set[mnfstnode]
1464 ndset = msng_filenode_set.setdefault(f, {})
1464 ndset = msng_filenode_set.setdefault(f, {})
1465 ndset.setdefault(fnode, clnode)
1465 ndset.setdefault(fnode, clnode)
1466 return collect_msng_filenodes
1466 return collect_msng_filenodes
1467
1467
1468 # If we determine that a particular file or manifest node must be a
1468 # If we determine that a particular file or manifest node must be a
1469 # node that the recipient of the changegroup will already have, we can
1469 # node that the recipient of the changegroup will already have, we can
1470 # also assume the recipient will have all the parents. This function
1470 # also assume the recipient will have all the parents. This function
1471 # prunes them from the set of missing nodes.
1471 # prunes them from the set of missing nodes.
1472 def prune(revlog, missingnodes):
1472 def prune(revlog, missingnodes):
1473 hasset = set()
1473 hasset = set()
1474 # If a 'missing' filenode thinks it belongs to a changenode we
1474 # If a 'missing' filenode thinks it belongs to a changenode we
1475 # assume the recipient must have, then the recipient must have
1475 # assume the recipient must have, then the recipient must have
1476 # that filenode.
1476 # that filenode.
1477 for n in missingnodes:
1477 for n in missingnodes:
1478 clrev = revlog.linkrev(revlog.rev(n))
1478 clrev = revlog.linkrev(revlog.rev(n))
1479 if clrev in commonrevs:
1479 if clrev in commonrevs:
1480 hasset.add(n)
1480 hasset.add(n)
1481 for n in hasset:
1481 for n in hasset:
1482 missingnodes.pop(n, None)
1482 missingnodes.pop(n, None)
1483 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1483 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1484 missingnodes.pop(revlog.node(r), None)
1484 missingnodes.pop(revlog.node(r), None)
1485
1485
1486 # Add the nodes that were explicitly requested.
1486 # Add the nodes that were explicitly requested.
1487 def add_extra_nodes(name, nodes):
1487 def add_extra_nodes(name, nodes):
1488 if not extranodes or name not in extranodes:
1488 if not extranodes or name not in extranodes:
1489 return
1489 return
1490
1490
1491 for node, linknode in extranodes[name]:
1491 for node, linknode in extranodes[name]:
1492 if node not in nodes:
1492 if node not in nodes:
1493 nodes[node] = linknode
1493 nodes[node] = linknode
1494
1494
1495 # Now that we have all theses utility functions to help out and
1495 # Now that we have all theses utility functions to help out and
1496 # logically divide up the task, generate the group.
1496 # logically divide up the task, generate the group.
1497 def gengroup():
1497 def gengroup():
1498 # The set of changed files starts empty.
1498 # The set of changed files starts empty.
1499 changedfiles = set()
1499 changedfiles = set()
1500 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1500 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1501
1501
1502 # Create a changenode group generator that will call our functions
1502 # Create a changenode group generator that will call our functions
1503 # back to lookup the owning changenode and collect information.
1503 # back to lookup the owning changenode and collect information.
1504 group = cl.group(msng_cl_lst, identity, collect)
1504 group = cl.group(msng_cl_lst, identity, collect)
1505 for cnt, chnk in enumerate(group):
1505 for cnt, chnk in enumerate(group):
1506 yield chnk
1506 yield chnk
1507 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1507 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1508 self.ui.progress(_('bundling changes'), None)
1508 self.ui.progress(_('bundling changes'), None)
1509
1509
1510 prune(mnfst, msng_mnfst_set)
1510 prune(mnfst, msng_mnfst_set)
1511 add_extra_nodes(1, msng_mnfst_set)
1511 add_extra_nodes(1, msng_mnfst_set)
1512 msng_mnfst_lst = msng_mnfst_set.keys()
1512 msng_mnfst_lst = msng_mnfst_set.keys()
1513 # Sort the manifestnodes by revision number.
1513 # Sort the manifestnodes by revision number.
1514 msng_mnfst_lst.sort(key=mnfst.rev)
1514 msng_mnfst_lst.sort(key=mnfst.rev)
1515 # Create a generator for the manifestnodes that calls our lookup
1515 # Create a generator for the manifestnodes that calls our lookup
1516 # and data collection functions back.
1516 # and data collection functions back.
1517 group = mnfst.group(msng_mnfst_lst,
1517 group = mnfst.group(msng_mnfst_lst,
1518 lambda mnode: msng_mnfst_set[mnode],
1518 lambda mnode: msng_mnfst_set[mnode],
1519 filenode_collector(changedfiles))
1519 filenode_collector(changedfiles))
1520 for cnt, chnk in enumerate(group):
1520 for cnt, chnk in enumerate(group):
1521 yield chnk
1521 yield chnk
1522 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1522 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1523 self.ui.progress(_('bundling manifests'), None)
1523 self.ui.progress(_('bundling manifests'), None)
1524
1524
1525 # These are no longer needed, dereference and toss the memory for
1525 # These are no longer needed, dereference and toss the memory for
1526 # them.
1526 # them.
1527 msng_mnfst_lst = None
1527 msng_mnfst_lst = None
1528 msng_mnfst_set.clear()
1528 msng_mnfst_set.clear()
1529
1529
1530 if extranodes:
1530 if extranodes:
1531 for fname in extranodes:
1531 for fname in extranodes:
1532 if isinstance(fname, int):
1532 if isinstance(fname, int):
1533 continue
1533 continue
1534 msng_filenode_set.setdefault(fname, {})
1534 msng_filenode_set.setdefault(fname, {})
1535 changedfiles.add(fname)
1535 changedfiles.add(fname)
1536 # Go through all our files in order sorted by name.
1536 # Go through all our files in order sorted by name.
1537 cnt = 0
1537 cnt = 0
1538 for fname in sorted(changedfiles):
1538 for fname in sorted(changedfiles):
1539 filerevlog = self.file(fname)
1539 filerevlog = self.file(fname)
1540 if not len(filerevlog):
1540 if not len(filerevlog):
1541 raise util.Abort(_("empty or missing revlog for %s") % fname)
1541 raise util.Abort(_("empty or missing revlog for %s") % fname)
1542 # Toss out the filenodes that the recipient isn't really
1542 # Toss out the filenodes that the recipient isn't really
1543 # missing.
1543 # missing.
1544 missingfnodes = msng_filenode_set.pop(fname, {})
1544 missingfnodes = msng_filenode_set.pop(fname, {})
1545 prune(filerevlog, missingfnodes)
1545 prune(filerevlog, missingfnodes)
1546 add_extra_nodes(fname, missingfnodes)
1546 add_extra_nodes(fname, missingfnodes)
1547 # If any filenodes are left, generate the group for them,
1547 # If any filenodes are left, generate the group for them,
1548 # otherwise don't bother.
1548 # otherwise don't bother.
1549 if missingfnodes:
1549 if missingfnodes:
1550 yield changegroup.chunkheader(len(fname))
1550 yield changegroup.chunkheader(len(fname))
1551 yield fname
1551 yield fname
1552 # Sort the filenodes by their revision # (topological order)
1552 # Sort the filenodes by their revision # (topological order)
1553 nodeiter = list(missingfnodes)
1553 nodeiter = list(missingfnodes)
1554 nodeiter.sort(key=filerevlog.rev)
1554 nodeiter.sort(key=filerevlog.rev)
1555 # Create a group generator and only pass in a changenode
1555 # Create a group generator and only pass in a changenode
1556 # lookup function as we need to collect no information
1556 # lookup function as we need to collect no information
1557 # from filenodes.
1557 # from filenodes.
1558 group = filerevlog.group(nodeiter,
1558 group = filerevlog.group(nodeiter,
1559 lambda fnode: missingfnodes[fnode])
1559 lambda fnode: missingfnodes[fnode])
1560 for chnk in group:
1560 for chnk in group:
1561 self.ui.progress(
1561 self.ui.progress(
1562 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1562 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1563 cnt += 1
1563 cnt += 1
1564 yield chnk
1564 yield chnk
1565 # Signal that no more groups are left.
1565 # Signal that no more groups are left.
1566 yield changegroup.closechunk()
1566 yield changegroup.closechunk()
1567 self.ui.progress(_('bundling files'), None)
1567 self.ui.progress(_('bundling files'), None)
1568
1568
1569 if msng_cl_lst:
1569 if msng_cl_lst:
1570 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1570 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1571
1571
1572 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1572 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1573
1573
1574 def changegroup(self, basenodes, source):
1574 def changegroup(self, basenodes, source):
1575 # to avoid a race we use changegroupsubset() (issue1320)
1575 # to avoid a race we use changegroupsubset() (issue1320)
1576 return self.changegroupsubset(basenodes, self.heads(), source)
1576 return self.changegroupsubset(basenodes, self.heads(), source)
1577
1577
1578 def _changegroup(self, nodes, source):
1578 def _changegroup(self, nodes, source):
1579 """Compute the changegroup of all nodes that we have that a recipient
1579 """Compute the changegroup of all nodes that we have that a recipient
1580 doesn't. Return a chunkbuffer object whose read() method will return
1580 doesn't. Return a chunkbuffer object whose read() method will return
1581 successive changegroup chunks.
1581 successive changegroup chunks.
1582
1582
1583 This is much easier than the previous function as we can assume that
1583 This is much easier than the previous function as we can assume that
1584 the recipient has any changenode we aren't sending them.
1584 the recipient has any changenode we aren't sending them.
1585
1585
1586 nodes is the set of nodes to send"""
1586 nodes is the set of nodes to send"""
1587
1587
1588 self.hook('preoutgoing', throw=True, source=source)
1588 self.hook('preoutgoing', throw=True, source=source)
1589
1589
1590 cl = self.changelog
1590 cl = self.changelog
1591 revset = set([cl.rev(n) for n in nodes])
1591 revset = set([cl.rev(n) for n in nodes])
1592 self.changegroupinfo(nodes, source)
1592 self.changegroupinfo(nodes, source)
1593
1593
1594 def identity(x):
1594 def identity(x):
1595 return x
1595 return x
1596
1596
1597 def gennodelst(log):
1597 def gennodelst(log):
1598 for r in log:
1598 for r in log:
1599 if log.linkrev(r) in revset:
1599 if log.linkrev(r) in revset:
1600 yield log.node(r)
1600 yield log.node(r)
1601
1601
1602 def lookuplinkrev_func(revlog):
1602 def lookuplinkrev_func(revlog):
1603 def lookuplinkrev(n):
1603 def lookuplinkrev(n):
1604 return cl.node(revlog.linkrev(revlog.rev(n)))
1604 return cl.node(revlog.linkrev(revlog.rev(n)))
1605 return lookuplinkrev
1605 return lookuplinkrev
1606
1606
1607 def gengroup():
1607 def gengroup():
1608 '''yield a sequence of changegroup chunks (strings)'''
1608 '''yield a sequence of changegroup chunks (strings)'''
1609 # construct a list of all changed files
1609 # construct a list of all changed files
1610 changedfiles = set()
1610 changedfiles = set()
1611 mmfs = {}
1611 mmfs = {}
1612 collect = changegroup.collector(cl, mmfs, changedfiles)
1612 collect = changegroup.collector(cl, mmfs, changedfiles)
1613
1613
1614 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1614 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1615 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1615 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1616 yield chnk
1616 yield chnk
1617 self.ui.progress(_('bundling changes'), None)
1617 self.ui.progress(_('bundling changes'), None)
1618
1618
1619 mnfst = self.manifest
1619 mnfst = self.manifest
1620 nodeiter = gennodelst(mnfst)
1620 nodeiter = gennodelst(mnfst)
1621 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1621 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1622 lookuplinkrev_func(mnfst))):
1622 lookuplinkrev_func(mnfst))):
1623 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1623 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1624 yield chnk
1624 yield chnk
1625 self.ui.progress(_('bundling manifests'), None)
1625 self.ui.progress(_('bundling manifests'), None)
1626
1626
1627 cnt = 0
1627 cnt = 0
1628 for fname in sorted(changedfiles):
1628 for fname in sorted(changedfiles):
1629 filerevlog = self.file(fname)
1629 filerevlog = self.file(fname)
1630 if not len(filerevlog):
1630 if not len(filerevlog):
1631 raise util.Abort(_("empty or missing revlog for %s") % fname)
1631 raise util.Abort(_("empty or missing revlog for %s") % fname)
1632 nodeiter = gennodelst(filerevlog)
1632 nodeiter = gennodelst(filerevlog)
1633 nodeiter = list(nodeiter)
1633 nodeiter = list(nodeiter)
1634 if nodeiter:
1634 if nodeiter:
1635 yield changegroup.chunkheader(len(fname))
1635 yield changegroup.chunkheader(len(fname))
1636 yield fname
1636 yield fname
1637 lookup = lookuplinkrev_func(filerevlog)
1637 lookup = lookuplinkrev_func(filerevlog)
1638 for chnk in filerevlog.group(nodeiter, lookup):
1638 for chnk in filerevlog.group(nodeiter, lookup):
1639 self.ui.progress(
1639 self.ui.progress(
1640 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1640 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1641 cnt += 1
1641 cnt += 1
1642 yield chnk
1642 yield chnk
1643 self.ui.progress(_('bundling files'), None)
1643 self.ui.progress(_('bundling files'), None)
1644
1644
1645 yield changegroup.closechunk()
1645 yield changegroup.closechunk()
1646
1646
1647 if nodes:
1647 if nodes:
1648 self.hook('outgoing', node=hex(nodes[0]), source=source)
1648 self.hook('outgoing', node=hex(nodes[0]), source=source)
1649
1649
1650 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1650 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1651
1651
1652 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1652 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1653 """Add the changegroup returned by source.read() to this repo.
1653 """Add the changegroup returned by source.read() to this repo.
1654 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1654 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1655 the URL of the repo where this changegroup is coming from.
1655 the URL of the repo where this changegroup is coming from.
1656
1656
1657 Return an integer summarizing the change to this repo:
1657 Return an integer summarizing the change to this repo:
1658 - nothing changed or no source: 0
1658 - nothing changed or no source: 0
1659 - more heads than before: 1+added heads (2..n)
1659 - more heads than before: 1+added heads (2..n)
1660 - fewer heads than before: -1-removed heads (-2..-n)
1660 - fewer heads than before: -1-removed heads (-2..-n)
1661 - number of heads stays the same: 1
1661 - number of heads stays the same: 1
1662 """
1662 """
1663 def csmap(x):
1663 def csmap(x):
1664 self.ui.debug("add changeset %s\n" % short(x))
1664 self.ui.debug("add changeset %s\n" % short(x))
1665 return len(cl)
1665 return len(cl)
1666
1666
1667 def revmap(x):
1667 def revmap(x):
1668 return cl.rev(x)
1668 return cl.rev(x)
1669
1669
1670 if not source:
1670 if not source:
1671 return 0
1671 return 0
1672
1672
1673 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1673 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1674
1674
1675 changesets = files = revisions = 0
1675 changesets = files = revisions = 0
1676 efiles = set()
1676 efiles = set()
1677
1677
1678 # write changelog data to temp files so concurrent readers will not see
1678 # write changelog data to temp files so concurrent readers will not see
1679 # inconsistent view
1679 # inconsistent view
1680 cl = self.changelog
1680 cl = self.changelog
1681 cl.delayupdate()
1681 cl.delayupdate()
1682 oldheads = len(cl.heads())
1682 oldheads = len(cl.heads())
1683
1683
1684 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1684 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1685 try:
1685 try:
1686 trp = weakref.proxy(tr)
1686 trp = weakref.proxy(tr)
1687 # pull off the changeset group
1687 # pull off the changeset group
1688 self.ui.status(_("adding changesets\n"))
1688 self.ui.status(_("adding changesets\n"))
1689 clstart = len(cl)
1689 clstart = len(cl)
1690 class prog(object):
1690 class prog(object):
1691 step = _('changesets')
1691 step = _('changesets')
1692 count = 1
1692 count = 1
1693 ui = self.ui
1693 ui = self.ui
1694 total = None
1694 total = None
1695 def __call__(self):
1695 def __call__(self):
1696 self.ui.progress(self.step, self.count, unit=_('chunks'),
1696 self.ui.progress(self.step, self.count, unit=_('chunks'),
1697 total=self.total)
1697 total=self.total)
1698 self.count += 1
1698 self.count += 1
1699 pr = prog()
1699 pr = prog()
1700 source.callback = pr
1700 source.callback = pr
1701
1701
1702 if (cl.addgroup(source, csmap, trp) is None
1702 if (cl.addgroup(source, csmap, trp) is None
1703 and not emptyok):
1703 and not emptyok):
1704 raise util.Abort(_("received changelog group is empty"))
1704 raise util.Abort(_("received changelog group is empty"))
1705 clend = len(cl)
1705 clend = len(cl)
1706 changesets = clend - clstart
1706 changesets = clend - clstart
1707 for c in xrange(clstart, clend):
1707 for c in xrange(clstart, clend):
1708 efiles.update(self[c].files())
1708 efiles.update(self[c].files())
1709 efiles = len(efiles)
1709 efiles = len(efiles)
1710 self.ui.progress(_('changesets'), None)
1710 self.ui.progress(_('changesets'), None)
1711
1711
1712 # pull off the manifest group
1712 # pull off the manifest group
1713 self.ui.status(_("adding manifests\n"))
1713 self.ui.status(_("adding manifests\n"))
1714 pr.step = _('manifests')
1714 pr.step = _('manifests')
1715 pr.count = 1
1715 pr.count = 1
1716 pr.total = changesets # manifests <= changesets
1716 pr.total = changesets # manifests <= changesets
1717 # no need to check for empty manifest group here:
1717 # no need to check for empty manifest group here:
1718 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1718 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1719 # no new manifest will be created and the manifest group will
1719 # no new manifest will be created and the manifest group will
1720 # be empty during the pull
1720 # be empty during the pull
1721 self.manifest.addgroup(source, revmap, trp)
1721 self.manifest.addgroup(source, revmap, trp)
1722 self.ui.progress(_('manifests'), None)
1722 self.ui.progress(_('manifests'), None)
1723
1723
1724 needfiles = {}
1724 needfiles = {}
1725 if self.ui.configbool('server', 'validate', default=False):
1725 if self.ui.configbool('server', 'validate', default=False):
1726 # validate incoming csets have their manifests
1726 # validate incoming csets have their manifests
1727 for cset in xrange(clstart, clend):
1727 for cset in xrange(clstart, clend):
1728 mfest = self.changelog.read(self.changelog.node(cset))[0]
1728 mfest = self.changelog.read(self.changelog.node(cset))[0]
1729 mfest = self.manifest.readdelta(mfest)
1729 mfest = self.manifest.readdelta(mfest)
1730 # store file nodes we must see
1730 # store file nodes we must see
1731 for f, n in mfest.iteritems():
1731 for f, n in mfest.iteritems():
1732 needfiles.setdefault(f, set()).add(n)
1732 needfiles.setdefault(f, set()).add(n)
1733
1733
1734 # process the files
1734 # process the files
1735 self.ui.status(_("adding file changes\n"))
1735 self.ui.status(_("adding file changes\n"))
1736 pr.step = 'files'
1736 pr.step = 'files'
1737 pr.count = 1
1737 pr.count = 1
1738 pr.total = efiles
1738 pr.total = efiles
1739 source.callback = None
1739 source.callback = None
1740
1740
1741 while 1:
1741 while 1:
1742 f = source.chunk()
1742 f = source.chunk()
1743 if not f:
1743 if not f:
1744 break
1744 break
1745 self.ui.debug("adding %s revisions\n" % f)
1745 self.ui.debug("adding %s revisions\n" % f)
1746 pr()
1746 pr()
1747 fl = self.file(f)
1747 fl = self.file(f)
1748 o = len(fl)
1748 o = len(fl)
1749 if fl.addgroup(source, revmap, trp) is None:
1749 if fl.addgroup(source, revmap, trp) is None:
1750 raise util.Abort(_("received file revlog group is empty"))
1750 raise util.Abort(_("received file revlog group is empty"))
1751 revisions += len(fl) - o
1751 revisions += len(fl) - o
1752 files += 1
1752 files += 1
1753 if f in needfiles:
1753 if f in needfiles:
1754 needs = needfiles[f]
1754 needs = needfiles[f]
1755 for new in xrange(o, len(fl)):
1755 for new in xrange(o, len(fl)):
1756 n = fl.node(new)
1756 n = fl.node(new)
1757 if n in needs:
1757 if n in needs:
1758 needs.remove(n)
1758 needs.remove(n)
1759 if not needs:
1759 if not needs:
1760 del needfiles[f]
1760 del needfiles[f]
1761 self.ui.progress(_('files'), None)
1761 self.ui.progress(_('files'), None)
1762
1762
1763 for f, needs in needfiles.iteritems():
1763 for f, needs in needfiles.iteritems():
1764 fl = self.file(f)
1764 fl = self.file(f)
1765 for n in needs:
1765 for n in needs:
1766 try:
1766 try:
1767 fl.rev(n)
1767 fl.rev(n)
1768 except error.LookupError:
1768 except error.LookupError:
1769 raise util.Abort(
1769 raise util.Abort(
1770 _('missing file data for %s:%s - run hg verify') %
1770 _('missing file data for %s:%s - run hg verify') %
1771 (f, hex(n)))
1771 (f, hex(n)))
1772
1772
1773 newheads = len(cl.heads())
1773 newheads = len(cl.heads())
1774 heads = ""
1774 heads = ""
1775 if oldheads and newheads != oldheads:
1775 if oldheads and newheads != oldheads:
1776 heads = _(" (%+d heads)") % (newheads - oldheads)
1776 heads = _(" (%+d heads)") % (newheads - oldheads)
1777
1777
1778 self.ui.status(_("added %d changesets"
1778 self.ui.status(_("added %d changesets"
1779 " with %d changes to %d files%s\n")
1779 " with %d changes to %d files%s\n")
1780 % (changesets, revisions, files, heads))
1780 % (changesets, revisions, files, heads))
1781
1781
1782 if changesets > 0:
1782 if changesets > 0:
1783 p = lambda: cl.writepending() and self.root or ""
1783 p = lambda: cl.writepending() and self.root or ""
1784 self.hook('pretxnchangegroup', throw=True,
1784 self.hook('pretxnchangegroup', throw=True,
1785 node=hex(cl.node(clstart)), source=srctype,
1785 node=hex(cl.node(clstart)), source=srctype,
1786 url=url, pending=p)
1786 url=url, pending=p)
1787
1787
1788 # make changelog see real files again
1788 # make changelog see real files again
1789 cl.finalize(trp)
1789 cl.finalize(trp)
1790
1790
1791 tr.close()
1791 tr.close()
1792 finally:
1792 finally:
1793 tr.release()
1793 tr.release()
1794 if lock:
1794 if lock:
1795 lock.release()
1795 lock.release()
1796
1796
1797 if changesets > 0:
1797 if changesets > 0:
1798 # forcefully update the on-disk branch cache
1798 # forcefully update the on-disk branch cache
1799 self.ui.debug("updating the branch cache\n")
1799 self.ui.debug("updating the branch cache\n")
1800 self.updatebranchcache()
1800 self.updatebranchcache()
1801 self.hook("changegroup", node=hex(cl.node(clstart)),
1801 self.hook("changegroup", node=hex(cl.node(clstart)),
1802 source=srctype, url=url)
1802 source=srctype, url=url)
1803
1803
1804 for i in xrange(clstart, clend):
1804 for i in xrange(clstart, clend):
1805 self.hook("incoming", node=hex(cl.node(i)),
1805 self.hook("incoming", node=hex(cl.node(i)),
1806 source=srctype, url=url)
1806 source=srctype, url=url)
1807
1807
1808 # never return 0 here:
1808 # never return 0 here:
1809 if newheads < oldheads:
1809 if newheads < oldheads:
1810 return newheads - oldheads - 1
1810 return newheads - oldheads - 1
1811 else:
1811 else:
1812 return newheads - oldheads + 1
1812 return newheads - oldheads + 1
1813
1813
1814
1814
1815 def stream_in(self, remote, requirements):
1815 def stream_in(self, remote, requirements):
1816 fp = remote.stream_out()
1816 fp = remote.stream_out()
1817 l = fp.readline()
1817 l = fp.readline()
1818 try:
1818 try:
1819 resp = int(l)
1819 resp = int(l)
1820 except ValueError:
1820 except ValueError:
1821 raise error.ResponseError(
1821 raise error.ResponseError(
1822 _('Unexpected response from remote server:'), l)
1822 _('Unexpected response from remote server:'), l)
1823 if resp == 1:
1823 if resp == 1:
1824 raise util.Abort(_('operation forbidden by server'))
1824 raise util.Abort(_('operation forbidden by server'))
1825 elif resp == 2:
1825 elif resp == 2:
1826 raise util.Abort(_('locking the remote repository failed'))
1826 raise util.Abort(_('locking the remote repository failed'))
1827 elif resp != 0:
1827 elif resp != 0:
1828 raise util.Abort(_('the server sent an unknown error code'))
1828 raise util.Abort(_('the server sent an unknown error code'))
1829 self.ui.status(_('streaming all changes\n'))
1829 self.ui.status(_('streaming all changes\n'))
1830 l = fp.readline()
1830 l = fp.readline()
1831 try:
1831 try:
1832 total_files, total_bytes = map(int, l.split(' ', 1))
1832 total_files, total_bytes = map(int, l.split(' ', 1))
1833 except (ValueError, TypeError):
1833 except (ValueError, TypeError):
1834 raise error.ResponseError(
1834 raise error.ResponseError(
1835 _('Unexpected response from remote server:'), l)
1835 _('Unexpected response from remote server:'), l)
1836 self.ui.status(_('%d files to transfer, %s of data\n') %
1836 self.ui.status(_('%d files to transfer, %s of data\n') %
1837 (total_files, util.bytecount(total_bytes)))
1837 (total_files, util.bytecount(total_bytes)))
1838 start = time.time()
1838 start = time.time()
1839 for i in xrange(total_files):
1839 for i in xrange(total_files):
1840 # XXX doesn't support '\n' or '\r' in filenames
1840 # XXX doesn't support '\n' or '\r' in filenames
1841 l = fp.readline()
1841 l = fp.readline()
1842 try:
1842 try:
1843 name, size = l.split('\0', 1)
1843 name, size = l.split('\0', 1)
1844 size = int(size)
1844 size = int(size)
1845 except (ValueError, TypeError):
1845 except (ValueError, TypeError):
1846 raise error.ResponseError(
1846 raise error.ResponseError(
1847 _('Unexpected response from remote server:'), l)
1847 _('Unexpected response from remote server:'), l)
1848 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1848 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1849 # for backwards compat, name was partially encoded
1849 # for backwards compat, name was partially encoded
1850 ofp = self.sopener(store.decodedir(name), 'w')
1850 ofp = self.sopener(store.decodedir(name), 'w')
1851 for chunk in util.filechunkiter(fp, limit=size):
1851 for chunk in util.filechunkiter(fp, limit=size):
1852 ofp.write(chunk)
1852 ofp.write(chunk)
1853 ofp.close()
1853 ofp.close()
1854 elapsed = time.time() - start
1854 elapsed = time.time() - start
1855 if elapsed <= 0:
1855 if elapsed <= 0:
1856 elapsed = 0.001
1856 elapsed = 0.001
1857 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1857 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1858 (util.bytecount(total_bytes), elapsed,
1858 (util.bytecount(total_bytes), elapsed,
1859 util.bytecount(total_bytes / elapsed)))
1859 util.bytecount(total_bytes / elapsed)))
1860
1860
1861 # new requirements = old non-format requirements + new format-related
1861 # new requirements = old non-format requirements + new format-related
1862 # requirements from the streamed-in repository
1862 # requirements from the streamed-in repository
1863 requirements.update(set(self.requirements) - self.supportedformats)
1863 requirements.update(set(self.requirements) - self.supportedformats)
1864 self._applyrequirements(requirements)
1864 self._applyrequirements(requirements)
1865 self._writerequirements()
1865 self._writerequirements()
1866
1866
1867 self.invalidate()
1867 self.invalidate()
1868 return len(self.heads()) + 1
1868 return len(self.heads()) + 1
1869
1869
1870 def clone(self, remote, heads=[], stream=False):
1870 def clone(self, remote, heads=[], stream=False):
1871 '''clone remote repository.
1871 '''clone remote repository.
1872
1872
1873 keyword arguments:
1873 keyword arguments:
1874 heads: list of revs to clone (forces use of pull)
1874 heads: list of revs to clone (forces use of pull)
1875 stream: use streaming clone if possible'''
1875 stream: use streaming clone if possible'''
1876
1876
1877 # now, all clients that can request uncompressed clones can
1877 # now, all clients that can request uncompressed clones can
1878 # read repo formats supported by all servers that can serve
1878 # read repo formats supported by all servers that can serve
1879 # them.
1879 # them.
1880
1880
1881 # if revlog format changes, client will have to check version
1881 # if revlog format changes, client will have to check version
1882 # and format flags on "stream" capability, and use
1882 # and format flags on "stream" capability, and use
1883 # uncompressed only if compatible.
1883 # uncompressed only if compatible.
1884
1884
1885 if stream and not heads:
1885 if stream and not heads:
1886 # 'stream' means remote revlog format is revlogv1 only
1886 # 'stream' means remote revlog format is revlogv1 only
1887 if remote.capable('stream'):
1887 if remote.capable('stream'):
1888 return self.stream_in(remote, set(('revlogv1',)))
1888 return self.stream_in(remote, set(('revlogv1',)))
1889 # otherwise, 'streamreqs' contains the remote revlog format
1889 # otherwise, 'streamreqs' contains the remote revlog format
1890 streamreqs = remote.capable('streamreqs')
1890 streamreqs = remote.capable('streamreqs')
1891 if streamreqs:
1891 if streamreqs:
1892 streamreqs = set(streamreqs.split(','))
1892 streamreqs = set(streamreqs.split(','))
1893 # if we support it, stream in and adjust our requirements
1893 # if we support it, stream in and adjust our requirements
1894 if not streamreqs - self.supportedformats:
1894 if not streamreqs - self.supportedformats:
1895 return self.stream_in(remote, streamreqs)
1895 return self.stream_in(remote, streamreqs)
1896 return self.pull(remote, heads)
1896 return self.pull(remote, heads)
1897
1897
1898 def pushkey(self, namespace, key, old, new):
1898 def pushkey(self, namespace, key, old, new):
1899 return pushkey.push(self, namespace, key, old, new)
1899 return pushkey.push(self, namespace, key, old, new)
1900
1900
1901 def listkeys(self, namespace):
1901 def listkeys(self, namespace):
1902 return pushkey.list(self, namespace)
1902 return pushkey.list(self, namespace)
1903
1903
1904 # used to avoid circular references so destructors work
1904 # used to avoid circular references so destructors work
1905 def aftertrans(files):
1905 def aftertrans(files):
1906 renamefiles = [tuple(t) for t in files]
1906 renamefiles = [tuple(t) for t in files]
1907 def a():
1907 def a():
1908 for src, dest in renamefiles:
1908 for src, dest in renamefiles:
1909 util.rename(src, dest)
1909 util.rename(src, dest)
1910 return a
1910 return a
1911
1911
1912 def instance(ui, path, create):
1912 def instance(ui, path, create):
1913 return localrepository(ui, util.drop_scheme('file', path), create)
1913 return localrepository(ui, util.drop_scheme('file', path), create)
1914
1914
1915 def islocal(path):
1915 def islocal(path):
1916 return True
1916 return True
General Comments 0
You need to be logged in to leave comments. Login now