##// END OF EJS Templates
addchangegroup: document the current locking semantics
Benoit Boissinot -
r13271:952baa2f default
parent child Browse files
Show More
@@ -1,1935 +1,1937 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 import url as urlmod
17 import url as urlmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 propertycache = util.propertycache
20 propertycache = util.propertycache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
24 supportedformats = set(('revlogv1', 'parentdelta'))
24 supportedformats = set(('revlogv1', 'parentdelta'))
25 supported = supportedformats | set(('store', 'fncache', 'shared',
25 supported = supportedformats | set(('store', 'fncache', 'shared',
26 'dotencode'))
26 'dotencode'))
27
27
28 def __init__(self, baseui, path=None, create=0):
28 def __init__(self, baseui, path=None, create=0):
29 repo.repository.__init__(self)
29 repo.repository.__init__(self)
30 self.root = os.path.realpath(util.expandpath(path))
30 self.root = os.path.realpath(util.expandpath(path))
31 self.path = os.path.join(self.root, ".hg")
31 self.path = os.path.join(self.root, ".hg")
32 self.origroot = path
32 self.origroot = path
33 self.auditor = util.path_auditor(self.root, self._checknested)
33 self.auditor = util.path_auditor(self.root, self._checknested)
34 self.opener = util.opener(self.path)
34 self.opener = util.opener(self.path)
35 self.wopener = util.opener(self.root)
35 self.wopener = util.opener(self.root)
36 self.baseui = baseui
36 self.baseui = baseui
37 self.ui = baseui.copy()
37 self.ui = baseui.copy()
38
38
39 try:
39 try:
40 self.ui.readconfig(self.join("hgrc"), self.root)
40 self.ui.readconfig(self.join("hgrc"), self.root)
41 extensions.loadall(self.ui)
41 extensions.loadall(self.ui)
42 except IOError:
42 except IOError:
43 pass
43 pass
44
44
45 if not os.path.isdir(self.path):
45 if not os.path.isdir(self.path):
46 if create:
46 if create:
47 if not os.path.exists(path):
47 if not os.path.exists(path):
48 util.makedirs(path)
48 util.makedirs(path)
49 os.mkdir(self.path)
49 os.mkdir(self.path)
50 requirements = ["revlogv1"]
50 requirements = ["revlogv1"]
51 if self.ui.configbool('format', 'usestore', True):
51 if self.ui.configbool('format', 'usestore', True):
52 os.mkdir(os.path.join(self.path, "store"))
52 os.mkdir(os.path.join(self.path, "store"))
53 requirements.append("store")
53 requirements.append("store")
54 if self.ui.configbool('format', 'usefncache', True):
54 if self.ui.configbool('format', 'usefncache', True):
55 requirements.append("fncache")
55 requirements.append("fncache")
56 if self.ui.configbool('format', 'dotencode', True):
56 if self.ui.configbool('format', 'dotencode', True):
57 requirements.append('dotencode')
57 requirements.append('dotencode')
58 # create an invalid changelog
58 # create an invalid changelog
59 self.opener("00changelog.i", "a").write(
59 self.opener("00changelog.i", "a").write(
60 '\0\0\0\2' # represents revlogv2
60 '\0\0\0\2' # represents revlogv2
61 ' dummy changelog to prevent using the old repo layout'
61 ' dummy changelog to prevent using the old repo layout'
62 )
62 )
63 if self.ui.configbool('format', 'parentdelta', False):
63 if self.ui.configbool('format', 'parentdelta', False):
64 requirements.append("parentdelta")
64 requirements.append("parentdelta")
65 else:
65 else:
66 raise error.RepoError(_("repository %s not found") % path)
66 raise error.RepoError(_("repository %s not found") % path)
67 elif create:
67 elif create:
68 raise error.RepoError(_("repository %s already exists") % path)
68 raise error.RepoError(_("repository %s already exists") % path)
69 else:
69 else:
70 # find requirements
70 # find requirements
71 requirements = set()
71 requirements = set()
72 try:
72 try:
73 requirements = set(self.opener("requires").read().splitlines())
73 requirements = set(self.opener("requires").read().splitlines())
74 except IOError, inst:
74 except IOError, inst:
75 if inst.errno != errno.ENOENT:
75 if inst.errno != errno.ENOENT:
76 raise
76 raise
77 for r in requirements - self.supported:
77 for r in requirements - self.supported:
78 raise error.RepoError(_("requirement '%s' not supported") % r)
78 raise error.RepoError(_("requirement '%s' not supported") % r)
79
79
80 self.sharedpath = self.path
80 self.sharedpath = self.path
81 try:
81 try:
82 s = os.path.realpath(self.opener("sharedpath").read())
82 s = os.path.realpath(self.opener("sharedpath").read())
83 if not os.path.exists(s):
83 if not os.path.exists(s):
84 raise error.RepoError(
84 raise error.RepoError(
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 self.sharedpath = s
86 self.sharedpath = s
87 except IOError, inst:
87 except IOError, inst:
88 if inst.errno != errno.ENOENT:
88 if inst.errno != errno.ENOENT:
89 raise
89 raise
90
90
91 self.store = store.store(requirements, self.sharedpath, util.opener)
91 self.store = store.store(requirements, self.sharedpath, util.opener)
92 self.spath = self.store.path
92 self.spath = self.store.path
93 self.sopener = self.store.opener
93 self.sopener = self.store.opener
94 self.sjoin = self.store.join
94 self.sjoin = self.store.join
95 self.opener.createmode = self.store.createmode
95 self.opener.createmode = self.store.createmode
96 self._applyrequirements(requirements)
96 self._applyrequirements(requirements)
97 if create:
97 if create:
98 self._writerequirements()
98 self._writerequirements()
99
99
100 # These two define the set of tags for this repository. _tags
100 # These two define the set of tags for this repository. _tags
101 # maps tag name to node; _tagtypes maps tag name to 'global' or
101 # maps tag name to node; _tagtypes maps tag name to 'global' or
102 # 'local'. (Global tags are defined by .hgtags across all
102 # 'local'. (Global tags are defined by .hgtags across all
103 # heads, and local tags are defined in .hg/localtags.) They
103 # heads, and local tags are defined in .hg/localtags.) They
104 # constitute the in-memory cache of tags.
104 # constitute the in-memory cache of tags.
105 self._tags = None
105 self._tags = None
106 self._tagtypes = None
106 self._tagtypes = None
107
107
108 self._branchcache = None
108 self._branchcache = None
109 self._branchcachetip = None
109 self._branchcachetip = None
110 self.nodetagscache = None
110 self.nodetagscache = None
111 self.filterpats = {}
111 self.filterpats = {}
112 self._datafilters = {}
112 self._datafilters = {}
113 self._transref = self._lockref = self._wlockref = None
113 self._transref = self._lockref = self._wlockref = None
114
114
115 def _applyrequirements(self, requirements):
115 def _applyrequirements(self, requirements):
116 self.requirements = requirements
116 self.requirements = requirements
117 self.sopener.options = {}
117 self.sopener.options = {}
118 if 'parentdelta' in requirements:
118 if 'parentdelta' in requirements:
119 self.sopener.options['parentdelta'] = 1
119 self.sopener.options['parentdelta'] = 1
120
120
121 def _writerequirements(self):
121 def _writerequirements(self):
122 reqfile = self.opener("requires", "w")
122 reqfile = self.opener("requires", "w")
123 for r in self.requirements:
123 for r in self.requirements:
124 reqfile.write("%s\n" % r)
124 reqfile.write("%s\n" % r)
125 reqfile.close()
125 reqfile.close()
126
126
127 def _checknested(self, path):
127 def _checknested(self, path):
128 """Determine if path is a legal nested repository."""
128 """Determine if path is a legal nested repository."""
129 if not path.startswith(self.root):
129 if not path.startswith(self.root):
130 return False
130 return False
131 subpath = path[len(self.root) + 1:]
131 subpath = path[len(self.root) + 1:]
132
132
133 # XXX: Checking against the current working copy is wrong in
133 # XXX: Checking against the current working copy is wrong in
134 # the sense that it can reject things like
134 # the sense that it can reject things like
135 #
135 #
136 # $ hg cat -r 10 sub/x.txt
136 # $ hg cat -r 10 sub/x.txt
137 #
137 #
138 # if sub/ is no longer a subrepository in the working copy
138 # if sub/ is no longer a subrepository in the working copy
139 # parent revision.
139 # parent revision.
140 #
140 #
141 # However, it can of course also allow things that would have
141 # However, it can of course also allow things that would have
142 # been rejected before, such as the above cat command if sub/
142 # been rejected before, such as the above cat command if sub/
143 # is a subrepository now, but was a normal directory before.
143 # is a subrepository now, but was a normal directory before.
144 # The old path auditor would have rejected by mistake since it
144 # The old path auditor would have rejected by mistake since it
145 # panics when it sees sub/.hg/.
145 # panics when it sees sub/.hg/.
146 #
146 #
147 # All in all, checking against the working copy seems sensible
147 # All in all, checking against the working copy seems sensible
148 # since we want to prevent access to nested repositories on
148 # since we want to prevent access to nested repositories on
149 # the filesystem *now*.
149 # the filesystem *now*.
150 ctx = self[None]
150 ctx = self[None]
151 parts = util.splitpath(subpath)
151 parts = util.splitpath(subpath)
152 while parts:
152 while parts:
153 prefix = os.sep.join(parts)
153 prefix = os.sep.join(parts)
154 if prefix in ctx.substate:
154 if prefix in ctx.substate:
155 if prefix == subpath:
155 if prefix == subpath:
156 return True
156 return True
157 else:
157 else:
158 sub = ctx.sub(prefix)
158 sub = ctx.sub(prefix)
159 return sub.checknested(subpath[len(prefix) + 1:])
159 return sub.checknested(subpath[len(prefix) + 1:])
160 else:
160 else:
161 parts.pop()
161 parts.pop()
162 return False
162 return False
163
163
164
164
165 @propertycache
165 @propertycache
166 def changelog(self):
166 def changelog(self):
167 c = changelog.changelog(self.sopener)
167 c = changelog.changelog(self.sopener)
168 if 'HG_PENDING' in os.environ:
168 if 'HG_PENDING' in os.environ:
169 p = os.environ['HG_PENDING']
169 p = os.environ['HG_PENDING']
170 if p.startswith(self.root):
170 if p.startswith(self.root):
171 c.readpending('00changelog.i.a')
171 c.readpending('00changelog.i.a')
172 self.sopener.options['defversion'] = c.version
172 self.sopener.options['defversion'] = c.version
173 return c
173 return c
174
174
175 @propertycache
175 @propertycache
176 def manifest(self):
176 def manifest(self):
177 return manifest.manifest(self.sopener)
177 return manifest.manifest(self.sopener)
178
178
179 @propertycache
179 @propertycache
180 def dirstate(self):
180 def dirstate(self):
181 warned = [0]
181 warned = [0]
182 def validate(node):
182 def validate(node):
183 try:
183 try:
184 r = self.changelog.rev(node)
184 r = self.changelog.rev(node)
185 return node
185 return node
186 except error.LookupError:
186 except error.LookupError:
187 if not warned[0]:
187 if not warned[0]:
188 warned[0] = True
188 warned[0] = True
189 self.ui.warn(_("warning: ignoring unknown"
189 self.ui.warn(_("warning: ignoring unknown"
190 " working parent %s!\n") % short(node))
190 " working parent %s!\n") % short(node))
191 return nullid
191 return nullid
192
192
193 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
193 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
194
194
195 def __getitem__(self, changeid):
195 def __getitem__(self, changeid):
196 if changeid is None:
196 if changeid is None:
197 return context.workingctx(self)
197 return context.workingctx(self)
198 return context.changectx(self, changeid)
198 return context.changectx(self, changeid)
199
199
200 def __contains__(self, changeid):
200 def __contains__(self, changeid):
201 try:
201 try:
202 return bool(self.lookup(changeid))
202 return bool(self.lookup(changeid))
203 except error.RepoLookupError:
203 except error.RepoLookupError:
204 return False
204 return False
205
205
206 def __nonzero__(self):
206 def __nonzero__(self):
207 return True
207 return True
208
208
209 def __len__(self):
209 def __len__(self):
210 return len(self.changelog)
210 return len(self.changelog)
211
211
212 def __iter__(self):
212 def __iter__(self):
213 for i in xrange(len(self)):
213 for i in xrange(len(self)):
214 yield i
214 yield i
215
215
216 def url(self):
216 def url(self):
217 return 'file:' + self.root
217 return 'file:' + self.root
218
218
219 def hook(self, name, throw=False, **args):
219 def hook(self, name, throw=False, **args):
220 return hook.hook(self.ui, self, name, throw, **args)
220 return hook.hook(self.ui, self, name, throw, **args)
221
221
222 tag_disallowed = ':\r\n'
222 tag_disallowed = ':\r\n'
223
223
224 def _tag(self, names, node, message, local, user, date, extra={}):
224 def _tag(self, names, node, message, local, user, date, extra={}):
225 if isinstance(names, str):
225 if isinstance(names, str):
226 allchars = names
226 allchars = names
227 names = (names,)
227 names = (names,)
228 else:
228 else:
229 allchars = ''.join(names)
229 allchars = ''.join(names)
230 for c in self.tag_disallowed:
230 for c in self.tag_disallowed:
231 if c in allchars:
231 if c in allchars:
232 raise util.Abort(_('%r cannot be used in a tag name') % c)
232 raise util.Abort(_('%r cannot be used in a tag name') % c)
233
233
234 branches = self.branchmap()
234 branches = self.branchmap()
235 for name in names:
235 for name in names:
236 self.hook('pretag', throw=True, node=hex(node), tag=name,
236 self.hook('pretag', throw=True, node=hex(node), tag=name,
237 local=local)
237 local=local)
238 if name in branches:
238 if name in branches:
239 self.ui.warn(_("warning: tag %s conflicts with existing"
239 self.ui.warn(_("warning: tag %s conflicts with existing"
240 " branch name\n") % name)
240 " branch name\n") % name)
241
241
242 def writetags(fp, names, munge, prevtags):
242 def writetags(fp, names, munge, prevtags):
243 fp.seek(0, 2)
243 fp.seek(0, 2)
244 if prevtags and prevtags[-1] != '\n':
244 if prevtags and prevtags[-1] != '\n':
245 fp.write('\n')
245 fp.write('\n')
246 for name in names:
246 for name in names:
247 m = munge and munge(name) or name
247 m = munge and munge(name) or name
248 if self._tagtypes and name in self._tagtypes:
248 if self._tagtypes and name in self._tagtypes:
249 old = self._tags.get(name, nullid)
249 old = self._tags.get(name, nullid)
250 fp.write('%s %s\n' % (hex(old), m))
250 fp.write('%s %s\n' % (hex(old), m))
251 fp.write('%s %s\n' % (hex(node), m))
251 fp.write('%s %s\n' % (hex(node), m))
252 fp.close()
252 fp.close()
253
253
254 prevtags = ''
254 prevtags = ''
255 if local:
255 if local:
256 try:
256 try:
257 fp = self.opener('localtags', 'r+')
257 fp = self.opener('localtags', 'r+')
258 except IOError:
258 except IOError:
259 fp = self.opener('localtags', 'a')
259 fp = self.opener('localtags', 'a')
260 else:
260 else:
261 prevtags = fp.read()
261 prevtags = fp.read()
262
262
263 # local tags are stored in the current charset
263 # local tags are stored in the current charset
264 writetags(fp, names, None, prevtags)
264 writetags(fp, names, None, prevtags)
265 for name in names:
265 for name in names:
266 self.hook('tag', node=hex(node), tag=name, local=local)
266 self.hook('tag', node=hex(node), tag=name, local=local)
267 return
267 return
268
268
269 try:
269 try:
270 fp = self.wfile('.hgtags', 'rb+')
270 fp = self.wfile('.hgtags', 'rb+')
271 except IOError:
271 except IOError:
272 fp = self.wfile('.hgtags', 'ab')
272 fp = self.wfile('.hgtags', 'ab')
273 else:
273 else:
274 prevtags = fp.read()
274 prevtags = fp.read()
275
275
276 # committed tags are stored in UTF-8
276 # committed tags are stored in UTF-8
277 writetags(fp, names, encoding.fromlocal, prevtags)
277 writetags(fp, names, encoding.fromlocal, prevtags)
278
278
279 if '.hgtags' not in self.dirstate:
279 if '.hgtags' not in self.dirstate:
280 self[None].add(['.hgtags'])
280 self[None].add(['.hgtags'])
281
281
282 m = matchmod.exact(self.root, '', ['.hgtags'])
282 m = matchmod.exact(self.root, '', ['.hgtags'])
283 tagnode = self.commit(message, user, date, extra=extra, match=m)
283 tagnode = self.commit(message, user, date, extra=extra, match=m)
284
284
285 for name in names:
285 for name in names:
286 self.hook('tag', node=hex(node), tag=name, local=local)
286 self.hook('tag', node=hex(node), tag=name, local=local)
287
287
288 return tagnode
288 return tagnode
289
289
290 def tag(self, names, node, message, local, user, date):
290 def tag(self, names, node, message, local, user, date):
291 '''tag a revision with one or more symbolic names.
291 '''tag a revision with one or more symbolic names.
292
292
293 names is a list of strings or, when adding a single tag, names may be a
293 names is a list of strings or, when adding a single tag, names may be a
294 string.
294 string.
295
295
296 if local is True, the tags are stored in a per-repository file.
296 if local is True, the tags are stored in a per-repository file.
297 otherwise, they are stored in the .hgtags file, and a new
297 otherwise, they are stored in the .hgtags file, and a new
298 changeset is committed with the change.
298 changeset is committed with the change.
299
299
300 keyword arguments:
300 keyword arguments:
301
301
302 local: whether to store tags in non-version-controlled file
302 local: whether to store tags in non-version-controlled file
303 (default False)
303 (default False)
304
304
305 message: commit message to use if committing
305 message: commit message to use if committing
306
306
307 user: name of user to use if committing
307 user: name of user to use if committing
308
308
309 date: date tuple to use if committing'''
309 date: date tuple to use if committing'''
310
310
311 if not local:
311 if not local:
312 for x in self.status()[:5]:
312 for x in self.status()[:5]:
313 if '.hgtags' in x:
313 if '.hgtags' in x:
314 raise util.Abort(_('working copy of .hgtags is changed '
314 raise util.Abort(_('working copy of .hgtags is changed '
315 '(please commit .hgtags manually)'))
315 '(please commit .hgtags manually)'))
316
316
317 self.tags() # instantiate the cache
317 self.tags() # instantiate the cache
318 self._tag(names, node, message, local, user, date)
318 self._tag(names, node, message, local, user, date)
319
319
320 def tags(self):
320 def tags(self):
321 '''return a mapping of tag to node'''
321 '''return a mapping of tag to node'''
322 if self._tags is None:
322 if self._tags is None:
323 (self._tags, self._tagtypes) = self._findtags()
323 (self._tags, self._tagtypes) = self._findtags()
324
324
325 return self._tags
325 return self._tags
326
326
327 def _findtags(self):
327 def _findtags(self):
328 '''Do the hard work of finding tags. Return a pair of dicts
328 '''Do the hard work of finding tags. Return a pair of dicts
329 (tags, tagtypes) where tags maps tag name to node, and tagtypes
329 (tags, tagtypes) where tags maps tag name to node, and tagtypes
330 maps tag name to a string like \'global\' or \'local\'.
330 maps tag name to a string like \'global\' or \'local\'.
331 Subclasses or extensions are free to add their own tags, but
331 Subclasses or extensions are free to add their own tags, but
332 should be aware that the returned dicts will be retained for the
332 should be aware that the returned dicts will be retained for the
333 duration of the localrepo object.'''
333 duration of the localrepo object.'''
334
334
335 # XXX what tagtype should subclasses/extensions use? Currently
335 # XXX what tagtype should subclasses/extensions use? Currently
336 # mq and bookmarks add tags, but do not set the tagtype at all.
336 # mq and bookmarks add tags, but do not set the tagtype at all.
337 # Should each extension invent its own tag type? Should there
337 # Should each extension invent its own tag type? Should there
338 # be one tagtype for all such "virtual" tags? Or is the status
338 # be one tagtype for all such "virtual" tags? Or is the status
339 # quo fine?
339 # quo fine?
340
340
341 alltags = {} # map tag name to (node, hist)
341 alltags = {} # map tag name to (node, hist)
342 tagtypes = {}
342 tagtypes = {}
343
343
344 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
344 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
345 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
345 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
346
346
347 # Build the return dicts. Have to re-encode tag names because
347 # Build the return dicts. Have to re-encode tag names because
348 # the tags module always uses UTF-8 (in order not to lose info
348 # the tags module always uses UTF-8 (in order not to lose info
349 # writing to the cache), but the rest of Mercurial wants them in
349 # writing to the cache), but the rest of Mercurial wants them in
350 # local encoding.
350 # local encoding.
351 tags = {}
351 tags = {}
352 for (name, (node, hist)) in alltags.iteritems():
352 for (name, (node, hist)) in alltags.iteritems():
353 if node != nullid:
353 if node != nullid:
354 tags[encoding.tolocal(name)] = node
354 tags[encoding.tolocal(name)] = node
355 tags['tip'] = self.changelog.tip()
355 tags['tip'] = self.changelog.tip()
356 tagtypes = dict([(encoding.tolocal(name), value)
356 tagtypes = dict([(encoding.tolocal(name), value)
357 for (name, value) in tagtypes.iteritems()])
357 for (name, value) in tagtypes.iteritems()])
358 return (tags, tagtypes)
358 return (tags, tagtypes)
359
359
360 def tagtype(self, tagname):
360 def tagtype(self, tagname):
361 '''
361 '''
362 return the type of the given tag. result can be:
362 return the type of the given tag. result can be:
363
363
364 'local' : a local tag
364 'local' : a local tag
365 'global' : a global tag
365 'global' : a global tag
366 None : tag does not exist
366 None : tag does not exist
367 '''
367 '''
368
368
369 self.tags()
369 self.tags()
370
370
371 return self._tagtypes.get(tagname)
371 return self._tagtypes.get(tagname)
372
372
373 def tagslist(self):
373 def tagslist(self):
374 '''return a list of tags ordered by revision'''
374 '''return a list of tags ordered by revision'''
375 l = []
375 l = []
376 for t, n in self.tags().iteritems():
376 for t, n in self.tags().iteritems():
377 try:
377 try:
378 r = self.changelog.rev(n)
378 r = self.changelog.rev(n)
379 except:
379 except:
380 r = -2 # sort to the beginning of the list if unknown
380 r = -2 # sort to the beginning of the list if unknown
381 l.append((r, t, n))
381 l.append((r, t, n))
382 return [(t, n) for r, t, n in sorted(l)]
382 return [(t, n) for r, t, n in sorted(l)]
383
383
384 def nodetags(self, node):
384 def nodetags(self, node):
385 '''return the tags associated with a node'''
385 '''return the tags associated with a node'''
386 if not self.nodetagscache:
386 if not self.nodetagscache:
387 self.nodetagscache = {}
387 self.nodetagscache = {}
388 for t, n in self.tags().iteritems():
388 for t, n in self.tags().iteritems():
389 self.nodetagscache.setdefault(n, []).append(t)
389 self.nodetagscache.setdefault(n, []).append(t)
390 for tags in self.nodetagscache.itervalues():
390 for tags in self.nodetagscache.itervalues():
391 tags.sort()
391 tags.sort()
392 return self.nodetagscache.get(node, [])
392 return self.nodetagscache.get(node, [])
393
393
394 def _branchtags(self, partial, lrev):
394 def _branchtags(self, partial, lrev):
395 # TODO: rename this function?
395 # TODO: rename this function?
396 tiprev = len(self) - 1
396 tiprev = len(self) - 1
397 if lrev != tiprev:
397 if lrev != tiprev:
398 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
398 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
399 self._updatebranchcache(partial, ctxgen)
399 self._updatebranchcache(partial, ctxgen)
400 self._writebranchcache(partial, self.changelog.tip(), tiprev)
400 self._writebranchcache(partial, self.changelog.tip(), tiprev)
401
401
402 return partial
402 return partial
403
403
404 def updatebranchcache(self):
404 def updatebranchcache(self):
405 tip = self.changelog.tip()
405 tip = self.changelog.tip()
406 if self._branchcache is not None and self._branchcachetip == tip:
406 if self._branchcache is not None and self._branchcachetip == tip:
407 return self._branchcache
407 return self._branchcache
408
408
409 oldtip = self._branchcachetip
409 oldtip = self._branchcachetip
410 self._branchcachetip = tip
410 self._branchcachetip = tip
411 if oldtip is None or oldtip not in self.changelog.nodemap:
411 if oldtip is None or oldtip not in self.changelog.nodemap:
412 partial, last, lrev = self._readbranchcache()
412 partial, last, lrev = self._readbranchcache()
413 else:
413 else:
414 lrev = self.changelog.rev(oldtip)
414 lrev = self.changelog.rev(oldtip)
415 partial = self._branchcache
415 partial = self._branchcache
416
416
417 self._branchtags(partial, lrev)
417 self._branchtags(partial, lrev)
418 # this private cache holds all heads (not just tips)
418 # this private cache holds all heads (not just tips)
419 self._branchcache = partial
419 self._branchcache = partial
420
420
421 def branchmap(self):
421 def branchmap(self):
422 '''returns a dictionary {branch: [branchheads]}'''
422 '''returns a dictionary {branch: [branchheads]}'''
423 self.updatebranchcache()
423 self.updatebranchcache()
424 return self._branchcache
424 return self._branchcache
425
425
426 def branchtags(self):
426 def branchtags(self):
427 '''return a dict where branch names map to the tipmost head of
427 '''return a dict where branch names map to the tipmost head of
428 the branch, open heads come before closed'''
428 the branch, open heads come before closed'''
429 bt = {}
429 bt = {}
430 for bn, heads in self.branchmap().iteritems():
430 for bn, heads in self.branchmap().iteritems():
431 tip = heads[-1]
431 tip = heads[-1]
432 for h in reversed(heads):
432 for h in reversed(heads):
433 if 'close' not in self.changelog.read(h)[5]:
433 if 'close' not in self.changelog.read(h)[5]:
434 tip = h
434 tip = h
435 break
435 break
436 bt[bn] = tip
436 bt[bn] = tip
437 return bt
437 return bt
438
438
439 def _readbranchcache(self):
439 def _readbranchcache(self):
440 partial = {}
440 partial = {}
441 try:
441 try:
442 f = self.opener("branchheads.cache")
442 f = self.opener("branchheads.cache")
443 lines = f.read().split('\n')
443 lines = f.read().split('\n')
444 f.close()
444 f.close()
445 except (IOError, OSError):
445 except (IOError, OSError):
446 return {}, nullid, nullrev
446 return {}, nullid, nullrev
447
447
448 try:
448 try:
449 last, lrev = lines.pop(0).split(" ", 1)
449 last, lrev = lines.pop(0).split(" ", 1)
450 last, lrev = bin(last), int(lrev)
450 last, lrev = bin(last), int(lrev)
451 if lrev >= len(self) or self[lrev].node() != last:
451 if lrev >= len(self) or self[lrev].node() != last:
452 # invalidate the cache
452 # invalidate the cache
453 raise ValueError('invalidating branch cache (tip differs)')
453 raise ValueError('invalidating branch cache (tip differs)')
454 for l in lines:
454 for l in lines:
455 if not l:
455 if not l:
456 continue
456 continue
457 node, label = l.split(" ", 1)
457 node, label = l.split(" ", 1)
458 label = encoding.tolocal(label.strip())
458 label = encoding.tolocal(label.strip())
459 partial.setdefault(label, []).append(bin(node))
459 partial.setdefault(label, []).append(bin(node))
460 except KeyboardInterrupt:
460 except KeyboardInterrupt:
461 raise
461 raise
462 except Exception, inst:
462 except Exception, inst:
463 if self.ui.debugflag:
463 if self.ui.debugflag:
464 self.ui.warn(str(inst), '\n')
464 self.ui.warn(str(inst), '\n')
465 partial, last, lrev = {}, nullid, nullrev
465 partial, last, lrev = {}, nullid, nullrev
466 return partial, last, lrev
466 return partial, last, lrev
467
467
468 def _writebranchcache(self, branches, tip, tiprev):
468 def _writebranchcache(self, branches, tip, tiprev):
469 try:
469 try:
470 f = self.opener("branchheads.cache", "w", atomictemp=True)
470 f = self.opener("branchheads.cache", "w", atomictemp=True)
471 f.write("%s %s\n" % (hex(tip), tiprev))
471 f.write("%s %s\n" % (hex(tip), tiprev))
472 for label, nodes in branches.iteritems():
472 for label, nodes in branches.iteritems():
473 for node in nodes:
473 for node in nodes:
474 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
474 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
475 f.rename()
475 f.rename()
476 except (IOError, OSError):
476 except (IOError, OSError):
477 pass
477 pass
478
478
479 def _updatebranchcache(self, partial, ctxgen):
479 def _updatebranchcache(self, partial, ctxgen):
480 # collect new branch entries
480 # collect new branch entries
481 newbranches = {}
481 newbranches = {}
482 for c in ctxgen:
482 for c in ctxgen:
483 newbranches.setdefault(c.branch(), []).append(c.node())
483 newbranches.setdefault(c.branch(), []).append(c.node())
484 # if older branchheads are reachable from new ones, they aren't
484 # if older branchheads are reachable from new ones, they aren't
485 # really branchheads. Note checking parents is insufficient:
485 # really branchheads. Note checking parents is insufficient:
486 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
486 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
487 for branch, newnodes in newbranches.iteritems():
487 for branch, newnodes in newbranches.iteritems():
488 bheads = partial.setdefault(branch, [])
488 bheads = partial.setdefault(branch, [])
489 bheads.extend(newnodes)
489 bheads.extend(newnodes)
490 if len(bheads) <= 1:
490 if len(bheads) <= 1:
491 continue
491 continue
492 # starting from tip means fewer passes over reachable
492 # starting from tip means fewer passes over reachable
493 while newnodes:
493 while newnodes:
494 latest = newnodes.pop()
494 latest = newnodes.pop()
495 if latest not in bheads:
495 if latest not in bheads:
496 continue
496 continue
497 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
497 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
498 reachable = self.changelog.reachable(latest, minbhrev)
498 reachable = self.changelog.reachable(latest, minbhrev)
499 reachable.remove(latest)
499 reachable.remove(latest)
500 bheads = [b for b in bheads if b not in reachable]
500 bheads = [b for b in bheads if b not in reachable]
501 partial[branch] = bheads
501 partial[branch] = bheads
502
502
503 def lookup(self, key):
503 def lookup(self, key):
504 if isinstance(key, int):
504 if isinstance(key, int):
505 return self.changelog.node(key)
505 return self.changelog.node(key)
506 elif key == '.':
506 elif key == '.':
507 return self.dirstate.parents()[0]
507 return self.dirstate.parents()[0]
508 elif key == 'null':
508 elif key == 'null':
509 return nullid
509 return nullid
510 elif key == 'tip':
510 elif key == 'tip':
511 return self.changelog.tip()
511 return self.changelog.tip()
512 n = self.changelog._match(key)
512 n = self.changelog._match(key)
513 if n:
513 if n:
514 return n
514 return n
515 if key in self.tags():
515 if key in self.tags():
516 return self.tags()[key]
516 return self.tags()[key]
517 if key in self.branchtags():
517 if key in self.branchtags():
518 return self.branchtags()[key]
518 return self.branchtags()[key]
519 n = self.changelog._partialmatch(key)
519 n = self.changelog._partialmatch(key)
520 if n:
520 if n:
521 return n
521 return n
522
522
523 # can't find key, check if it might have come from damaged dirstate
523 # can't find key, check if it might have come from damaged dirstate
524 if key in self.dirstate.parents():
524 if key in self.dirstate.parents():
525 raise error.Abort(_("working directory has unknown parent '%s'!")
525 raise error.Abort(_("working directory has unknown parent '%s'!")
526 % short(key))
526 % short(key))
527 try:
527 try:
528 if len(key) == 20:
528 if len(key) == 20:
529 key = hex(key)
529 key = hex(key)
530 except:
530 except:
531 pass
531 pass
532 raise error.RepoLookupError(_("unknown revision '%s'") % key)
532 raise error.RepoLookupError(_("unknown revision '%s'") % key)
533
533
534 def lookupbranch(self, key, remote=None):
534 def lookupbranch(self, key, remote=None):
535 repo = remote or self
535 repo = remote or self
536 if key in repo.branchmap():
536 if key in repo.branchmap():
537 return key
537 return key
538
538
539 repo = (remote and remote.local()) and remote or self
539 repo = (remote and remote.local()) and remote or self
540 return repo[key].branch()
540 return repo[key].branch()
541
541
542 def local(self):
542 def local(self):
543 return True
543 return True
544
544
545 def join(self, f):
545 def join(self, f):
546 return os.path.join(self.path, f)
546 return os.path.join(self.path, f)
547
547
548 def wjoin(self, f):
548 def wjoin(self, f):
549 return os.path.join(self.root, f)
549 return os.path.join(self.root, f)
550
550
551 def file(self, f):
551 def file(self, f):
552 if f[0] == '/':
552 if f[0] == '/':
553 f = f[1:]
553 f = f[1:]
554 return filelog.filelog(self.sopener, f)
554 return filelog.filelog(self.sopener, f)
555
555
556 def changectx(self, changeid):
556 def changectx(self, changeid):
557 return self[changeid]
557 return self[changeid]
558
558
559 def parents(self, changeid=None):
559 def parents(self, changeid=None):
560 '''get list of changectxs for parents of changeid'''
560 '''get list of changectxs for parents of changeid'''
561 return self[changeid].parents()
561 return self[changeid].parents()
562
562
563 def filectx(self, path, changeid=None, fileid=None):
563 def filectx(self, path, changeid=None, fileid=None):
564 """changeid can be a changeset revision, node, or tag.
564 """changeid can be a changeset revision, node, or tag.
565 fileid can be a file revision or node."""
565 fileid can be a file revision or node."""
566 return context.filectx(self, path, changeid, fileid)
566 return context.filectx(self, path, changeid, fileid)
567
567
568 def getcwd(self):
568 def getcwd(self):
569 return self.dirstate.getcwd()
569 return self.dirstate.getcwd()
570
570
571 def pathto(self, f, cwd=None):
571 def pathto(self, f, cwd=None):
572 return self.dirstate.pathto(f, cwd)
572 return self.dirstate.pathto(f, cwd)
573
573
574 def wfile(self, f, mode='r'):
574 def wfile(self, f, mode='r'):
575 return self.wopener(f, mode)
575 return self.wopener(f, mode)
576
576
577 def _link(self, f):
577 def _link(self, f):
578 return os.path.islink(self.wjoin(f))
578 return os.path.islink(self.wjoin(f))
579
579
580 def _loadfilter(self, filter):
580 def _loadfilter(self, filter):
581 if filter not in self.filterpats:
581 if filter not in self.filterpats:
582 l = []
582 l = []
583 for pat, cmd in self.ui.configitems(filter):
583 for pat, cmd in self.ui.configitems(filter):
584 if cmd == '!':
584 if cmd == '!':
585 continue
585 continue
586 mf = matchmod.match(self.root, '', [pat])
586 mf = matchmod.match(self.root, '', [pat])
587 fn = None
587 fn = None
588 params = cmd
588 params = cmd
589 for name, filterfn in self._datafilters.iteritems():
589 for name, filterfn in self._datafilters.iteritems():
590 if cmd.startswith(name):
590 if cmd.startswith(name):
591 fn = filterfn
591 fn = filterfn
592 params = cmd[len(name):].lstrip()
592 params = cmd[len(name):].lstrip()
593 break
593 break
594 if not fn:
594 if not fn:
595 fn = lambda s, c, **kwargs: util.filter(s, c)
595 fn = lambda s, c, **kwargs: util.filter(s, c)
596 # Wrap old filters not supporting keyword arguments
596 # Wrap old filters not supporting keyword arguments
597 if not inspect.getargspec(fn)[2]:
597 if not inspect.getargspec(fn)[2]:
598 oldfn = fn
598 oldfn = fn
599 fn = lambda s, c, **kwargs: oldfn(s, c)
599 fn = lambda s, c, **kwargs: oldfn(s, c)
600 l.append((mf, fn, params))
600 l.append((mf, fn, params))
601 self.filterpats[filter] = l
601 self.filterpats[filter] = l
602 return self.filterpats[filter]
602 return self.filterpats[filter]
603
603
604 def _filter(self, filterpats, filename, data):
604 def _filter(self, filterpats, filename, data):
605 for mf, fn, cmd in filterpats:
605 for mf, fn, cmd in filterpats:
606 if mf(filename):
606 if mf(filename):
607 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
607 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
608 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
608 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
609 break
609 break
610
610
611 return data
611 return data
612
612
613 @propertycache
613 @propertycache
614 def _encodefilterpats(self):
614 def _encodefilterpats(self):
615 return self._loadfilter('encode')
615 return self._loadfilter('encode')
616
616
617 @propertycache
617 @propertycache
618 def _decodefilterpats(self):
618 def _decodefilterpats(self):
619 return self._loadfilter('decode')
619 return self._loadfilter('decode')
620
620
621 def adddatafilter(self, name, filter):
621 def adddatafilter(self, name, filter):
622 self._datafilters[name] = filter
622 self._datafilters[name] = filter
623
623
624 def wread(self, filename):
624 def wread(self, filename):
625 if self._link(filename):
625 if self._link(filename):
626 data = os.readlink(self.wjoin(filename))
626 data = os.readlink(self.wjoin(filename))
627 else:
627 else:
628 data = self.wopener(filename, 'r').read()
628 data = self.wopener(filename, 'r').read()
629 return self._filter(self._encodefilterpats, filename, data)
629 return self._filter(self._encodefilterpats, filename, data)
630
630
631 def wwrite(self, filename, data, flags):
631 def wwrite(self, filename, data, flags):
632 data = self._filter(self._decodefilterpats, filename, data)
632 data = self._filter(self._decodefilterpats, filename, data)
633 if 'l' in flags:
633 if 'l' in flags:
634 self.wopener.symlink(data, filename)
634 self.wopener.symlink(data, filename)
635 else:
635 else:
636 self.wopener(filename, 'w').write(data)
636 self.wopener(filename, 'w').write(data)
637 if 'x' in flags:
637 if 'x' in flags:
638 util.set_flags(self.wjoin(filename), False, True)
638 util.set_flags(self.wjoin(filename), False, True)
639
639
640 def wwritedata(self, filename, data):
640 def wwritedata(self, filename, data):
641 return self._filter(self._decodefilterpats, filename, data)
641 return self._filter(self._decodefilterpats, filename, data)
642
642
643 def transaction(self, desc):
643 def transaction(self, desc):
644 tr = self._transref and self._transref() or None
644 tr = self._transref and self._transref() or None
645 if tr and tr.running():
645 if tr and tr.running():
646 return tr.nest()
646 return tr.nest()
647
647
648 # abort here if the journal already exists
648 # abort here if the journal already exists
649 if os.path.exists(self.sjoin("journal")):
649 if os.path.exists(self.sjoin("journal")):
650 raise error.RepoError(
650 raise error.RepoError(
651 _("abandoned transaction found - run hg recover"))
651 _("abandoned transaction found - run hg recover"))
652
652
653 # save dirstate for rollback
653 # save dirstate for rollback
654 try:
654 try:
655 ds = self.opener("dirstate").read()
655 ds = self.opener("dirstate").read()
656 except IOError:
656 except IOError:
657 ds = ""
657 ds = ""
658 self.opener("journal.dirstate", "w").write(ds)
658 self.opener("journal.dirstate", "w").write(ds)
659 self.opener("journal.branch", "w").write(
659 self.opener("journal.branch", "w").write(
660 encoding.fromlocal(self.dirstate.branch()))
660 encoding.fromlocal(self.dirstate.branch()))
661 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
661 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
662
662
663 renames = [(self.sjoin("journal"), self.sjoin("undo")),
663 renames = [(self.sjoin("journal"), self.sjoin("undo")),
664 (self.join("journal.dirstate"), self.join("undo.dirstate")),
664 (self.join("journal.dirstate"), self.join("undo.dirstate")),
665 (self.join("journal.branch"), self.join("undo.branch")),
665 (self.join("journal.branch"), self.join("undo.branch")),
666 (self.join("journal.desc"), self.join("undo.desc"))]
666 (self.join("journal.desc"), self.join("undo.desc"))]
667 tr = transaction.transaction(self.ui.warn, self.sopener,
667 tr = transaction.transaction(self.ui.warn, self.sopener,
668 self.sjoin("journal"),
668 self.sjoin("journal"),
669 aftertrans(renames),
669 aftertrans(renames),
670 self.store.createmode)
670 self.store.createmode)
671 self._transref = weakref.ref(tr)
671 self._transref = weakref.ref(tr)
672 return tr
672 return tr
673
673
674 def recover(self):
674 def recover(self):
675 lock = self.lock()
675 lock = self.lock()
676 try:
676 try:
677 if os.path.exists(self.sjoin("journal")):
677 if os.path.exists(self.sjoin("journal")):
678 self.ui.status(_("rolling back interrupted transaction\n"))
678 self.ui.status(_("rolling back interrupted transaction\n"))
679 transaction.rollback(self.sopener, self.sjoin("journal"),
679 transaction.rollback(self.sopener, self.sjoin("journal"),
680 self.ui.warn)
680 self.ui.warn)
681 self.invalidate()
681 self.invalidate()
682 return True
682 return True
683 else:
683 else:
684 self.ui.warn(_("no interrupted transaction available\n"))
684 self.ui.warn(_("no interrupted transaction available\n"))
685 return False
685 return False
686 finally:
686 finally:
687 lock.release()
687 lock.release()
688
688
689 def rollback(self, dryrun=False):
689 def rollback(self, dryrun=False):
690 wlock = lock = None
690 wlock = lock = None
691 try:
691 try:
692 wlock = self.wlock()
692 wlock = self.wlock()
693 lock = self.lock()
693 lock = self.lock()
694 if os.path.exists(self.sjoin("undo")):
694 if os.path.exists(self.sjoin("undo")):
695 try:
695 try:
696 args = self.opener("undo.desc", "r").read().splitlines()
696 args = self.opener("undo.desc", "r").read().splitlines()
697 if len(args) >= 3 and self.ui.verbose:
697 if len(args) >= 3 and self.ui.verbose:
698 desc = _("rolling back to revision %s"
698 desc = _("rolling back to revision %s"
699 " (undo %s: %s)\n") % (
699 " (undo %s: %s)\n") % (
700 int(args[0]) - 1, args[1], args[2])
700 int(args[0]) - 1, args[1], args[2])
701 elif len(args) >= 2:
701 elif len(args) >= 2:
702 desc = _("rolling back to revision %s (undo %s)\n") % (
702 desc = _("rolling back to revision %s (undo %s)\n") % (
703 int(args[0]) - 1, args[1])
703 int(args[0]) - 1, args[1])
704 except IOError:
704 except IOError:
705 desc = _("rolling back unknown transaction\n")
705 desc = _("rolling back unknown transaction\n")
706 self.ui.status(desc)
706 self.ui.status(desc)
707 if dryrun:
707 if dryrun:
708 return
708 return
709 transaction.rollback(self.sopener, self.sjoin("undo"),
709 transaction.rollback(self.sopener, self.sjoin("undo"),
710 self.ui.warn)
710 self.ui.warn)
711 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
711 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
712 try:
712 try:
713 branch = self.opener("undo.branch").read()
713 branch = self.opener("undo.branch").read()
714 self.dirstate.setbranch(branch)
714 self.dirstate.setbranch(branch)
715 except IOError:
715 except IOError:
716 self.ui.warn(_("Named branch could not be reset, "
716 self.ui.warn(_("Named branch could not be reset, "
717 "current branch still is: %s\n")
717 "current branch still is: %s\n")
718 % self.dirstate.branch())
718 % self.dirstate.branch())
719 self.invalidate()
719 self.invalidate()
720 self.dirstate.invalidate()
720 self.dirstate.invalidate()
721 self.destroyed()
721 self.destroyed()
722 else:
722 else:
723 self.ui.warn(_("no rollback information available\n"))
723 self.ui.warn(_("no rollback information available\n"))
724 return 1
724 return 1
725 finally:
725 finally:
726 release(lock, wlock)
726 release(lock, wlock)
727
727
728 def invalidatecaches(self):
728 def invalidatecaches(self):
729 self._tags = None
729 self._tags = None
730 self._tagtypes = None
730 self._tagtypes = None
731 self.nodetagscache = None
731 self.nodetagscache = None
732 self._branchcache = None # in UTF-8
732 self._branchcache = None # in UTF-8
733 self._branchcachetip = None
733 self._branchcachetip = None
734
734
735 def invalidate(self):
735 def invalidate(self):
736 for a in ("changelog", "manifest"):
736 for a in ("changelog", "manifest"):
737 if a in self.__dict__:
737 if a in self.__dict__:
738 delattr(self, a)
738 delattr(self, a)
739 self.invalidatecaches()
739 self.invalidatecaches()
740
740
741 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
741 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
742 try:
742 try:
743 l = lock.lock(lockname, 0, releasefn, desc=desc)
743 l = lock.lock(lockname, 0, releasefn, desc=desc)
744 except error.LockHeld, inst:
744 except error.LockHeld, inst:
745 if not wait:
745 if not wait:
746 raise
746 raise
747 self.ui.warn(_("waiting for lock on %s held by %r\n") %
747 self.ui.warn(_("waiting for lock on %s held by %r\n") %
748 (desc, inst.locker))
748 (desc, inst.locker))
749 # default to 600 seconds timeout
749 # default to 600 seconds timeout
750 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
750 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
751 releasefn, desc=desc)
751 releasefn, desc=desc)
752 if acquirefn:
752 if acquirefn:
753 acquirefn()
753 acquirefn()
754 return l
754 return l
755
755
756 def lock(self, wait=True):
756 def lock(self, wait=True):
757 '''Lock the repository store (.hg/store) and return a weak reference
757 '''Lock the repository store (.hg/store) and return a weak reference
758 to the lock. Use this before modifying the store (e.g. committing or
758 to the lock. Use this before modifying the store (e.g. committing or
759 stripping). If you are opening a transaction, get a lock as well.)'''
759 stripping). If you are opening a transaction, get a lock as well.)'''
760 l = self._lockref and self._lockref()
760 l = self._lockref and self._lockref()
761 if l is not None and l.held:
761 if l is not None and l.held:
762 l.lock()
762 l.lock()
763 return l
763 return l
764
764
765 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
765 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
766 _('repository %s') % self.origroot)
766 _('repository %s') % self.origroot)
767 self._lockref = weakref.ref(l)
767 self._lockref = weakref.ref(l)
768 return l
768 return l
769
769
770 def wlock(self, wait=True):
770 def wlock(self, wait=True):
771 '''Lock the non-store parts of the repository (everything under
771 '''Lock the non-store parts of the repository (everything under
772 .hg except .hg/store) and return a weak reference to the lock.
772 .hg except .hg/store) and return a weak reference to the lock.
773 Use this before modifying files in .hg.'''
773 Use this before modifying files in .hg.'''
774 l = self._wlockref and self._wlockref()
774 l = self._wlockref and self._wlockref()
775 if l is not None and l.held:
775 if l is not None and l.held:
776 l.lock()
776 l.lock()
777 return l
777 return l
778
778
779 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
779 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
780 self.dirstate.invalidate, _('working directory of %s') %
780 self.dirstate.invalidate, _('working directory of %s') %
781 self.origroot)
781 self.origroot)
782 self._wlockref = weakref.ref(l)
782 self._wlockref = weakref.ref(l)
783 return l
783 return l
784
784
785 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
785 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
786 """
786 """
787 commit an individual file as part of a larger transaction
787 commit an individual file as part of a larger transaction
788 """
788 """
789
789
790 fname = fctx.path()
790 fname = fctx.path()
791 text = fctx.data()
791 text = fctx.data()
792 flog = self.file(fname)
792 flog = self.file(fname)
793 fparent1 = manifest1.get(fname, nullid)
793 fparent1 = manifest1.get(fname, nullid)
794 fparent2 = fparent2o = manifest2.get(fname, nullid)
794 fparent2 = fparent2o = manifest2.get(fname, nullid)
795
795
796 meta = {}
796 meta = {}
797 copy = fctx.renamed()
797 copy = fctx.renamed()
798 if copy and copy[0] != fname:
798 if copy and copy[0] != fname:
799 # Mark the new revision of this file as a copy of another
799 # Mark the new revision of this file as a copy of another
800 # file. This copy data will effectively act as a parent
800 # file. This copy data will effectively act as a parent
801 # of this new revision. If this is a merge, the first
801 # of this new revision. If this is a merge, the first
802 # parent will be the nullid (meaning "look up the copy data")
802 # parent will be the nullid (meaning "look up the copy data")
803 # and the second one will be the other parent. For example:
803 # and the second one will be the other parent. For example:
804 #
804 #
805 # 0 --- 1 --- 3 rev1 changes file foo
805 # 0 --- 1 --- 3 rev1 changes file foo
806 # \ / rev2 renames foo to bar and changes it
806 # \ / rev2 renames foo to bar and changes it
807 # \- 2 -/ rev3 should have bar with all changes and
807 # \- 2 -/ rev3 should have bar with all changes and
808 # should record that bar descends from
808 # should record that bar descends from
809 # bar in rev2 and foo in rev1
809 # bar in rev2 and foo in rev1
810 #
810 #
811 # this allows this merge to succeed:
811 # this allows this merge to succeed:
812 #
812 #
813 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
813 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
814 # \ / merging rev3 and rev4 should use bar@rev2
814 # \ / merging rev3 and rev4 should use bar@rev2
815 # \- 2 --- 4 as the merge base
815 # \- 2 --- 4 as the merge base
816 #
816 #
817
817
818 cfname = copy[0]
818 cfname = copy[0]
819 crev = manifest1.get(cfname)
819 crev = manifest1.get(cfname)
820 newfparent = fparent2
820 newfparent = fparent2
821
821
822 if manifest2: # branch merge
822 if manifest2: # branch merge
823 if fparent2 == nullid or crev is None: # copied on remote side
823 if fparent2 == nullid or crev is None: # copied on remote side
824 if cfname in manifest2:
824 if cfname in manifest2:
825 crev = manifest2[cfname]
825 crev = manifest2[cfname]
826 newfparent = fparent1
826 newfparent = fparent1
827
827
828 # find source in nearest ancestor if we've lost track
828 # find source in nearest ancestor if we've lost track
829 if not crev:
829 if not crev:
830 self.ui.debug(" %s: searching for copy revision for %s\n" %
830 self.ui.debug(" %s: searching for copy revision for %s\n" %
831 (fname, cfname))
831 (fname, cfname))
832 for ancestor in self[None].ancestors():
832 for ancestor in self[None].ancestors():
833 if cfname in ancestor:
833 if cfname in ancestor:
834 crev = ancestor[cfname].filenode()
834 crev = ancestor[cfname].filenode()
835 break
835 break
836
836
837 if crev:
837 if crev:
838 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
838 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
839 meta["copy"] = cfname
839 meta["copy"] = cfname
840 meta["copyrev"] = hex(crev)
840 meta["copyrev"] = hex(crev)
841 fparent1, fparent2 = nullid, newfparent
841 fparent1, fparent2 = nullid, newfparent
842 else:
842 else:
843 self.ui.warn(_("warning: can't find ancestor for '%s' "
843 self.ui.warn(_("warning: can't find ancestor for '%s' "
844 "copied from '%s'!\n") % (fname, cfname))
844 "copied from '%s'!\n") % (fname, cfname))
845
845
846 elif fparent2 != nullid:
846 elif fparent2 != nullid:
847 # is one parent an ancestor of the other?
847 # is one parent an ancestor of the other?
848 fparentancestor = flog.ancestor(fparent1, fparent2)
848 fparentancestor = flog.ancestor(fparent1, fparent2)
849 if fparentancestor == fparent1:
849 if fparentancestor == fparent1:
850 fparent1, fparent2 = fparent2, nullid
850 fparent1, fparent2 = fparent2, nullid
851 elif fparentancestor == fparent2:
851 elif fparentancestor == fparent2:
852 fparent2 = nullid
852 fparent2 = nullid
853
853
854 # is the file changed?
854 # is the file changed?
855 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
855 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
856 changelist.append(fname)
856 changelist.append(fname)
857 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
857 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
858
858
859 # are just the flags changed during merge?
859 # are just the flags changed during merge?
860 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
860 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
861 changelist.append(fname)
861 changelist.append(fname)
862
862
863 return fparent1
863 return fparent1
864
864
865 def commit(self, text="", user=None, date=None, match=None, force=False,
865 def commit(self, text="", user=None, date=None, match=None, force=False,
866 editor=False, extra={}):
866 editor=False, extra={}):
867 """Add a new revision to current repository.
867 """Add a new revision to current repository.
868
868
869 Revision information is gathered from the working directory,
869 Revision information is gathered from the working directory,
870 match can be used to filter the committed files. If editor is
870 match can be used to filter the committed files. If editor is
871 supplied, it is called to get a commit message.
871 supplied, it is called to get a commit message.
872 """
872 """
873
873
874 def fail(f, msg):
874 def fail(f, msg):
875 raise util.Abort('%s: %s' % (f, msg))
875 raise util.Abort('%s: %s' % (f, msg))
876
876
877 if not match:
877 if not match:
878 match = matchmod.always(self.root, '')
878 match = matchmod.always(self.root, '')
879
879
880 if not force:
880 if not force:
881 vdirs = []
881 vdirs = []
882 match.dir = vdirs.append
882 match.dir = vdirs.append
883 match.bad = fail
883 match.bad = fail
884
884
885 wlock = self.wlock()
885 wlock = self.wlock()
886 try:
886 try:
887 wctx = self[None]
887 wctx = self[None]
888 merge = len(wctx.parents()) > 1
888 merge = len(wctx.parents()) > 1
889
889
890 if (not force and merge and match and
890 if (not force and merge and match and
891 (match.files() or match.anypats())):
891 (match.files() or match.anypats())):
892 raise util.Abort(_('cannot partially commit a merge '
892 raise util.Abort(_('cannot partially commit a merge '
893 '(do not specify files or patterns)'))
893 '(do not specify files or patterns)'))
894
894
895 changes = self.status(match=match, clean=force)
895 changes = self.status(match=match, clean=force)
896 if force:
896 if force:
897 changes[0].extend(changes[6]) # mq may commit unchanged files
897 changes[0].extend(changes[6]) # mq may commit unchanged files
898
898
899 # check subrepos
899 # check subrepos
900 subs = []
900 subs = []
901 removedsubs = set()
901 removedsubs = set()
902 for p in wctx.parents():
902 for p in wctx.parents():
903 removedsubs.update(s for s in p.substate if match(s))
903 removedsubs.update(s for s in p.substate if match(s))
904 for s in wctx.substate:
904 for s in wctx.substate:
905 removedsubs.discard(s)
905 removedsubs.discard(s)
906 if match(s) and wctx.sub(s).dirty():
906 if match(s) and wctx.sub(s).dirty():
907 subs.append(s)
907 subs.append(s)
908 if (subs or removedsubs):
908 if (subs or removedsubs):
909 if (not match('.hgsub') and
909 if (not match('.hgsub') and
910 '.hgsub' in (wctx.modified() + wctx.added())):
910 '.hgsub' in (wctx.modified() + wctx.added())):
911 raise util.Abort(_("can't commit subrepos without .hgsub"))
911 raise util.Abort(_("can't commit subrepos without .hgsub"))
912 if '.hgsubstate' not in changes[0]:
912 if '.hgsubstate' not in changes[0]:
913 changes[0].insert(0, '.hgsubstate')
913 changes[0].insert(0, '.hgsubstate')
914
914
915 # make sure all explicit patterns are matched
915 # make sure all explicit patterns are matched
916 if not force and match.files():
916 if not force and match.files():
917 matched = set(changes[0] + changes[1] + changes[2])
917 matched = set(changes[0] + changes[1] + changes[2])
918
918
919 for f in match.files():
919 for f in match.files():
920 if f == '.' or f in matched or f in wctx.substate:
920 if f == '.' or f in matched or f in wctx.substate:
921 continue
921 continue
922 if f in changes[3]: # missing
922 if f in changes[3]: # missing
923 fail(f, _('file not found!'))
923 fail(f, _('file not found!'))
924 if f in vdirs: # visited directory
924 if f in vdirs: # visited directory
925 d = f + '/'
925 d = f + '/'
926 for mf in matched:
926 for mf in matched:
927 if mf.startswith(d):
927 if mf.startswith(d):
928 break
928 break
929 else:
929 else:
930 fail(f, _("no match under directory!"))
930 fail(f, _("no match under directory!"))
931 elif f not in self.dirstate:
931 elif f not in self.dirstate:
932 fail(f, _("file not tracked!"))
932 fail(f, _("file not tracked!"))
933
933
934 if (not force and not extra.get("close") and not merge
934 if (not force and not extra.get("close") and not merge
935 and not (changes[0] or changes[1] or changes[2])
935 and not (changes[0] or changes[1] or changes[2])
936 and wctx.branch() == wctx.p1().branch()):
936 and wctx.branch() == wctx.p1().branch()):
937 return None
937 return None
938
938
939 ms = mergemod.mergestate(self)
939 ms = mergemod.mergestate(self)
940 for f in changes[0]:
940 for f in changes[0]:
941 if f in ms and ms[f] == 'u':
941 if f in ms and ms[f] == 'u':
942 raise util.Abort(_("unresolved merge conflicts "
942 raise util.Abort(_("unresolved merge conflicts "
943 "(see hg resolve)"))
943 "(see hg resolve)"))
944
944
945 cctx = context.workingctx(self, text, user, date, extra, changes)
945 cctx = context.workingctx(self, text, user, date, extra, changes)
946 if editor:
946 if editor:
947 cctx._text = editor(self, cctx, subs)
947 cctx._text = editor(self, cctx, subs)
948 edited = (text != cctx._text)
948 edited = (text != cctx._text)
949
949
950 # commit subs
950 # commit subs
951 if subs or removedsubs:
951 if subs or removedsubs:
952 state = wctx.substate.copy()
952 state = wctx.substate.copy()
953 for s in sorted(subs):
953 for s in sorted(subs):
954 sub = wctx.sub(s)
954 sub = wctx.sub(s)
955 self.ui.status(_('committing subrepository %s\n') %
955 self.ui.status(_('committing subrepository %s\n') %
956 subrepo.subrelpath(sub))
956 subrepo.subrelpath(sub))
957 sr = sub.commit(cctx._text, user, date)
957 sr = sub.commit(cctx._text, user, date)
958 state[s] = (state[s][0], sr)
958 state[s] = (state[s][0], sr)
959 subrepo.writestate(self, state)
959 subrepo.writestate(self, state)
960
960
961 # Save commit message in case this transaction gets rolled back
961 # Save commit message in case this transaction gets rolled back
962 # (e.g. by a pretxncommit hook). Leave the content alone on
962 # (e.g. by a pretxncommit hook). Leave the content alone on
963 # the assumption that the user will use the same editor again.
963 # the assumption that the user will use the same editor again.
964 msgfile = self.opener('last-message.txt', 'wb')
964 msgfile = self.opener('last-message.txt', 'wb')
965 msgfile.write(cctx._text)
965 msgfile.write(cctx._text)
966 msgfile.close()
966 msgfile.close()
967
967
968 p1, p2 = self.dirstate.parents()
968 p1, p2 = self.dirstate.parents()
969 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
969 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
970 try:
970 try:
971 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
971 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
972 ret = self.commitctx(cctx, True)
972 ret = self.commitctx(cctx, True)
973 except:
973 except:
974 if edited:
974 if edited:
975 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
975 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
976 self.ui.write(
976 self.ui.write(
977 _('note: commit message saved in %s\n') % msgfn)
977 _('note: commit message saved in %s\n') % msgfn)
978 raise
978 raise
979
979
980 # update dirstate and mergestate
980 # update dirstate and mergestate
981 for f in changes[0] + changes[1]:
981 for f in changes[0] + changes[1]:
982 self.dirstate.normal(f)
982 self.dirstate.normal(f)
983 for f in changes[2]:
983 for f in changes[2]:
984 self.dirstate.forget(f)
984 self.dirstate.forget(f)
985 self.dirstate.setparents(ret)
985 self.dirstate.setparents(ret)
986 ms.reset()
986 ms.reset()
987 finally:
987 finally:
988 wlock.release()
988 wlock.release()
989
989
990 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
990 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
991 return ret
991 return ret
992
992
993 def commitctx(self, ctx, error=False):
993 def commitctx(self, ctx, error=False):
994 """Add a new revision to current repository.
994 """Add a new revision to current repository.
995 Revision information is passed via the context argument.
995 Revision information is passed via the context argument.
996 """
996 """
997
997
998 tr = lock = None
998 tr = lock = None
999 removed = list(ctx.removed())
999 removed = list(ctx.removed())
1000 p1, p2 = ctx.p1(), ctx.p2()
1000 p1, p2 = ctx.p1(), ctx.p2()
1001 m1 = p1.manifest().copy()
1001 m1 = p1.manifest().copy()
1002 m2 = p2.manifest()
1002 m2 = p2.manifest()
1003 user = ctx.user()
1003 user = ctx.user()
1004
1004
1005 lock = self.lock()
1005 lock = self.lock()
1006 try:
1006 try:
1007 tr = self.transaction("commit")
1007 tr = self.transaction("commit")
1008 trp = weakref.proxy(tr)
1008 trp = weakref.proxy(tr)
1009
1009
1010 # check in files
1010 # check in files
1011 new = {}
1011 new = {}
1012 changed = []
1012 changed = []
1013 linkrev = len(self)
1013 linkrev = len(self)
1014 for f in sorted(ctx.modified() + ctx.added()):
1014 for f in sorted(ctx.modified() + ctx.added()):
1015 self.ui.note(f + "\n")
1015 self.ui.note(f + "\n")
1016 try:
1016 try:
1017 fctx = ctx[f]
1017 fctx = ctx[f]
1018 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1018 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1019 changed)
1019 changed)
1020 m1.set(f, fctx.flags())
1020 m1.set(f, fctx.flags())
1021 except OSError, inst:
1021 except OSError, inst:
1022 self.ui.warn(_("trouble committing %s!\n") % f)
1022 self.ui.warn(_("trouble committing %s!\n") % f)
1023 raise
1023 raise
1024 except IOError, inst:
1024 except IOError, inst:
1025 errcode = getattr(inst, 'errno', errno.ENOENT)
1025 errcode = getattr(inst, 'errno', errno.ENOENT)
1026 if error or errcode and errcode != errno.ENOENT:
1026 if error or errcode and errcode != errno.ENOENT:
1027 self.ui.warn(_("trouble committing %s!\n") % f)
1027 self.ui.warn(_("trouble committing %s!\n") % f)
1028 raise
1028 raise
1029 else:
1029 else:
1030 removed.append(f)
1030 removed.append(f)
1031
1031
1032 # update manifest
1032 # update manifest
1033 m1.update(new)
1033 m1.update(new)
1034 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1034 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1035 drop = [f for f in removed if f in m1]
1035 drop = [f for f in removed if f in m1]
1036 for f in drop:
1036 for f in drop:
1037 del m1[f]
1037 del m1[f]
1038 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1038 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1039 p2.manifestnode(), (new, drop))
1039 p2.manifestnode(), (new, drop))
1040
1040
1041 # update changelog
1041 # update changelog
1042 self.changelog.delayupdate()
1042 self.changelog.delayupdate()
1043 n = self.changelog.add(mn, changed + removed, ctx.description(),
1043 n = self.changelog.add(mn, changed + removed, ctx.description(),
1044 trp, p1.node(), p2.node(),
1044 trp, p1.node(), p2.node(),
1045 user, ctx.date(), ctx.extra().copy())
1045 user, ctx.date(), ctx.extra().copy())
1046 p = lambda: self.changelog.writepending() and self.root or ""
1046 p = lambda: self.changelog.writepending() and self.root or ""
1047 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1047 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1048 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1048 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1049 parent2=xp2, pending=p)
1049 parent2=xp2, pending=p)
1050 self.changelog.finalize(trp)
1050 self.changelog.finalize(trp)
1051 tr.close()
1051 tr.close()
1052
1052
1053 if self._branchcache:
1053 if self._branchcache:
1054 self.updatebranchcache()
1054 self.updatebranchcache()
1055 return n
1055 return n
1056 finally:
1056 finally:
1057 if tr:
1057 if tr:
1058 tr.release()
1058 tr.release()
1059 lock.release()
1059 lock.release()
1060
1060
1061 def destroyed(self):
1061 def destroyed(self):
1062 '''Inform the repository that nodes have been destroyed.
1062 '''Inform the repository that nodes have been destroyed.
1063 Intended for use by strip and rollback, so there's a common
1063 Intended for use by strip and rollback, so there's a common
1064 place for anything that has to be done after destroying history.'''
1064 place for anything that has to be done after destroying history.'''
1065 # XXX it might be nice if we could take the list of destroyed
1065 # XXX it might be nice if we could take the list of destroyed
1066 # nodes, but I don't see an easy way for rollback() to do that
1066 # nodes, but I don't see an easy way for rollback() to do that
1067
1067
1068 # Ensure the persistent tag cache is updated. Doing it now
1068 # Ensure the persistent tag cache is updated. Doing it now
1069 # means that the tag cache only has to worry about destroyed
1069 # means that the tag cache only has to worry about destroyed
1070 # heads immediately after a strip/rollback. That in turn
1070 # heads immediately after a strip/rollback. That in turn
1071 # guarantees that "cachetip == currenttip" (comparing both rev
1071 # guarantees that "cachetip == currenttip" (comparing both rev
1072 # and node) always means no nodes have been added or destroyed.
1072 # and node) always means no nodes have been added or destroyed.
1073
1073
1074 # XXX this is suboptimal when qrefresh'ing: we strip the current
1074 # XXX this is suboptimal when qrefresh'ing: we strip the current
1075 # head, refresh the tag cache, then immediately add a new head.
1075 # head, refresh the tag cache, then immediately add a new head.
1076 # But I think doing it this way is necessary for the "instant
1076 # But I think doing it this way is necessary for the "instant
1077 # tag cache retrieval" case to work.
1077 # tag cache retrieval" case to work.
1078 self.invalidatecaches()
1078 self.invalidatecaches()
1079
1079
1080 def walk(self, match, node=None):
1080 def walk(self, match, node=None):
1081 '''
1081 '''
1082 walk recursively through the directory tree or a given
1082 walk recursively through the directory tree or a given
1083 changeset, finding all files matched by the match
1083 changeset, finding all files matched by the match
1084 function
1084 function
1085 '''
1085 '''
1086 return self[node].walk(match)
1086 return self[node].walk(match)
1087
1087
1088 def status(self, node1='.', node2=None, match=None,
1088 def status(self, node1='.', node2=None, match=None,
1089 ignored=False, clean=False, unknown=False,
1089 ignored=False, clean=False, unknown=False,
1090 listsubrepos=False):
1090 listsubrepos=False):
1091 """return status of files between two nodes or node and working directory
1091 """return status of files between two nodes or node and working directory
1092
1092
1093 If node1 is None, use the first dirstate parent instead.
1093 If node1 is None, use the first dirstate parent instead.
1094 If node2 is None, compare node1 with working directory.
1094 If node2 is None, compare node1 with working directory.
1095 """
1095 """
1096
1096
1097 def mfmatches(ctx):
1097 def mfmatches(ctx):
1098 mf = ctx.manifest().copy()
1098 mf = ctx.manifest().copy()
1099 for fn in mf.keys():
1099 for fn in mf.keys():
1100 if not match(fn):
1100 if not match(fn):
1101 del mf[fn]
1101 del mf[fn]
1102 return mf
1102 return mf
1103
1103
1104 if isinstance(node1, context.changectx):
1104 if isinstance(node1, context.changectx):
1105 ctx1 = node1
1105 ctx1 = node1
1106 else:
1106 else:
1107 ctx1 = self[node1]
1107 ctx1 = self[node1]
1108 if isinstance(node2, context.changectx):
1108 if isinstance(node2, context.changectx):
1109 ctx2 = node2
1109 ctx2 = node2
1110 else:
1110 else:
1111 ctx2 = self[node2]
1111 ctx2 = self[node2]
1112
1112
1113 working = ctx2.rev() is None
1113 working = ctx2.rev() is None
1114 parentworking = working and ctx1 == self['.']
1114 parentworking = working and ctx1 == self['.']
1115 match = match or matchmod.always(self.root, self.getcwd())
1115 match = match or matchmod.always(self.root, self.getcwd())
1116 listignored, listclean, listunknown = ignored, clean, unknown
1116 listignored, listclean, listunknown = ignored, clean, unknown
1117
1117
1118 # load earliest manifest first for caching reasons
1118 # load earliest manifest first for caching reasons
1119 if not working and ctx2.rev() < ctx1.rev():
1119 if not working and ctx2.rev() < ctx1.rev():
1120 ctx2.manifest()
1120 ctx2.manifest()
1121
1121
1122 if not parentworking:
1122 if not parentworking:
1123 def bad(f, msg):
1123 def bad(f, msg):
1124 if f not in ctx1:
1124 if f not in ctx1:
1125 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1125 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1126 match.bad = bad
1126 match.bad = bad
1127
1127
1128 if working: # we need to scan the working dir
1128 if working: # we need to scan the working dir
1129 subrepos = []
1129 subrepos = []
1130 if '.hgsub' in self.dirstate:
1130 if '.hgsub' in self.dirstate:
1131 subrepos = ctx1.substate.keys()
1131 subrepos = ctx1.substate.keys()
1132 s = self.dirstate.status(match, subrepos, listignored,
1132 s = self.dirstate.status(match, subrepos, listignored,
1133 listclean, listunknown)
1133 listclean, listunknown)
1134 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1134 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1135
1135
1136 # check for any possibly clean files
1136 # check for any possibly clean files
1137 if parentworking and cmp:
1137 if parentworking and cmp:
1138 fixup = []
1138 fixup = []
1139 # do a full compare of any files that might have changed
1139 # do a full compare of any files that might have changed
1140 for f in sorted(cmp):
1140 for f in sorted(cmp):
1141 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1141 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1142 or ctx1[f].cmp(ctx2[f])):
1142 or ctx1[f].cmp(ctx2[f])):
1143 modified.append(f)
1143 modified.append(f)
1144 else:
1144 else:
1145 fixup.append(f)
1145 fixup.append(f)
1146
1146
1147 # update dirstate for files that are actually clean
1147 # update dirstate for files that are actually clean
1148 if fixup:
1148 if fixup:
1149 if listclean:
1149 if listclean:
1150 clean += fixup
1150 clean += fixup
1151
1151
1152 try:
1152 try:
1153 # updating the dirstate is optional
1153 # updating the dirstate is optional
1154 # so we don't wait on the lock
1154 # so we don't wait on the lock
1155 wlock = self.wlock(False)
1155 wlock = self.wlock(False)
1156 try:
1156 try:
1157 for f in fixup:
1157 for f in fixup:
1158 self.dirstate.normal(f)
1158 self.dirstate.normal(f)
1159 finally:
1159 finally:
1160 wlock.release()
1160 wlock.release()
1161 except error.LockError:
1161 except error.LockError:
1162 pass
1162 pass
1163
1163
1164 if not parentworking:
1164 if not parentworking:
1165 mf1 = mfmatches(ctx1)
1165 mf1 = mfmatches(ctx1)
1166 if working:
1166 if working:
1167 # we are comparing working dir against non-parent
1167 # we are comparing working dir against non-parent
1168 # generate a pseudo-manifest for the working dir
1168 # generate a pseudo-manifest for the working dir
1169 mf2 = mfmatches(self['.'])
1169 mf2 = mfmatches(self['.'])
1170 for f in cmp + modified + added:
1170 for f in cmp + modified + added:
1171 mf2[f] = None
1171 mf2[f] = None
1172 mf2.set(f, ctx2.flags(f))
1172 mf2.set(f, ctx2.flags(f))
1173 for f in removed:
1173 for f in removed:
1174 if f in mf2:
1174 if f in mf2:
1175 del mf2[f]
1175 del mf2[f]
1176 else:
1176 else:
1177 # we are comparing two revisions
1177 # we are comparing two revisions
1178 deleted, unknown, ignored = [], [], []
1178 deleted, unknown, ignored = [], [], []
1179 mf2 = mfmatches(ctx2)
1179 mf2 = mfmatches(ctx2)
1180
1180
1181 modified, added, clean = [], [], []
1181 modified, added, clean = [], [], []
1182 for fn in mf2:
1182 for fn in mf2:
1183 if fn in mf1:
1183 if fn in mf1:
1184 if (mf1.flags(fn) != mf2.flags(fn) or
1184 if (mf1.flags(fn) != mf2.flags(fn) or
1185 (mf1[fn] != mf2[fn] and
1185 (mf1[fn] != mf2[fn] and
1186 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1186 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1187 modified.append(fn)
1187 modified.append(fn)
1188 elif listclean:
1188 elif listclean:
1189 clean.append(fn)
1189 clean.append(fn)
1190 del mf1[fn]
1190 del mf1[fn]
1191 else:
1191 else:
1192 added.append(fn)
1192 added.append(fn)
1193 removed = mf1.keys()
1193 removed = mf1.keys()
1194
1194
1195 r = modified, added, removed, deleted, unknown, ignored, clean
1195 r = modified, added, removed, deleted, unknown, ignored, clean
1196
1196
1197 if listsubrepos:
1197 if listsubrepos:
1198 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1198 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1199 if working:
1199 if working:
1200 rev2 = None
1200 rev2 = None
1201 else:
1201 else:
1202 rev2 = ctx2.substate[subpath][1]
1202 rev2 = ctx2.substate[subpath][1]
1203 try:
1203 try:
1204 submatch = matchmod.narrowmatcher(subpath, match)
1204 submatch = matchmod.narrowmatcher(subpath, match)
1205 s = sub.status(rev2, match=submatch, ignored=listignored,
1205 s = sub.status(rev2, match=submatch, ignored=listignored,
1206 clean=listclean, unknown=listunknown,
1206 clean=listclean, unknown=listunknown,
1207 listsubrepos=True)
1207 listsubrepos=True)
1208 for rfiles, sfiles in zip(r, s):
1208 for rfiles, sfiles in zip(r, s):
1209 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1209 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1210 except error.LookupError:
1210 except error.LookupError:
1211 self.ui.status(_("skipping missing subrepository: %s\n")
1211 self.ui.status(_("skipping missing subrepository: %s\n")
1212 % subpath)
1212 % subpath)
1213
1213
1214 [l.sort() for l in r]
1214 [l.sort() for l in r]
1215 return r
1215 return r
1216
1216
1217 def heads(self, start=None):
1217 def heads(self, start=None):
1218 heads = self.changelog.heads(start)
1218 heads = self.changelog.heads(start)
1219 # sort the output in rev descending order
1219 # sort the output in rev descending order
1220 return sorted(heads, key=self.changelog.rev, reverse=True)
1220 return sorted(heads, key=self.changelog.rev, reverse=True)
1221
1221
1222 def branchheads(self, branch=None, start=None, closed=False):
1222 def branchheads(self, branch=None, start=None, closed=False):
1223 '''return a (possibly filtered) list of heads for the given branch
1223 '''return a (possibly filtered) list of heads for the given branch
1224
1224
1225 Heads are returned in topological order, from newest to oldest.
1225 Heads are returned in topological order, from newest to oldest.
1226 If branch is None, use the dirstate branch.
1226 If branch is None, use the dirstate branch.
1227 If start is not None, return only heads reachable from start.
1227 If start is not None, return only heads reachable from start.
1228 If closed is True, return heads that are marked as closed as well.
1228 If closed is True, return heads that are marked as closed as well.
1229 '''
1229 '''
1230 if branch is None:
1230 if branch is None:
1231 branch = self[None].branch()
1231 branch = self[None].branch()
1232 branches = self.branchmap()
1232 branches = self.branchmap()
1233 if branch not in branches:
1233 if branch not in branches:
1234 return []
1234 return []
1235 # the cache returns heads ordered lowest to highest
1235 # the cache returns heads ordered lowest to highest
1236 bheads = list(reversed(branches[branch]))
1236 bheads = list(reversed(branches[branch]))
1237 if start is not None:
1237 if start is not None:
1238 # filter out the heads that cannot be reached from startrev
1238 # filter out the heads that cannot be reached from startrev
1239 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1239 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1240 bheads = [h for h in bheads if h in fbheads]
1240 bheads = [h for h in bheads if h in fbheads]
1241 if not closed:
1241 if not closed:
1242 bheads = [h for h in bheads if
1242 bheads = [h for h in bheads if
1243 ('close' not in self.changelog.read(h)[5])]
1243 ('close' not in self.changelog.read(h)[5])]
1244 return bheads
1244 return bheads
1245
1245
1246 def branches(self, nodes):
1246 def branches(self, nodes):
1247 if not nodes:
1247 if not nodes:
1248 nodes = [self.changelog.tip()]
1248 nodes = [self.changelog.tip()]
1249 b = []
1249 b = []
1250 for n in nodes:
1250 for n in nodes:
1251 t = n
1251 t = n
1252 while 1:
1252 while 1:
1253 p = self.changelog.parents(n)
1253 p = self.changelog.parents(n)
1254 if p[1] != nullid or p[0] == nullid:
1254 if p[1] != nullid or p[0] == nullid:
1255 b.append((t, n, p[0], p[1]))
1255 b.append((t, n, p[0], p[1]))
1256 break
1256 break
1257 n = p[0]
1257 n = p[0]
1258 return b
1258 return b
1259
1259
1260 def between(self, pairs):
1260 def between(self, pairs):
1261 r = []
1261 r = []
1262
1262
1263 for top, bottom in pairs:
1263 for top, bottom in pairs:
1264 n, l, i = top, [], 0
1264 n, l, i = top, [], 0
1265 f = 1
1265 f = 1
1266
1266
1267 while n != bottom and n != nullid:
1267 while n != bottom and n != nullid:
1268 p = self.changelog.parents(n)[0]
1268 p = self.changelog.parents(n)[0]
1269 if i == f:
1269 if i == f:
1270 l.append(n)
1270 l.append(n)
1271 f = f * 2
1271 f = f * 2
1272 n = p
1272 n = p
1273 i += 1
1273 i += 1
1274
1274
1275 r.append(l)
1275 r.append(l)
1276
1276
1277 return r
1277 return r
1278
1278
1279 def pull(self, remote, heads=None, force=False):
1279 def pull(self, remote, heads=None, force=False):
1280 lock = self.lock()
1280 lock = self.lock()
1281 try:
1281 try:
1282 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1282 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1283 force=force)
1283 force=force)
1284 common, fetch, rheads = tmp
1284 common, fetch, rheads = tmp
1285 if not fetch:
1285 if not fetch:
1286 self.ui.status(_("no changes found\n"))
1286 self.ui.status(_("no changes found\n"))
1287 return 0
1287 return 0
1288
1288
1289 if heads is None and fetch == [nullid]:
1289 if heads is None and fetch == [nullid]:
1290 self.ui.status(_("requesting all changes\n"))
1290 self.ui.status(_("requesting all changes\n"))
1291 elif heads is None and remote.capable('changegroupsubset'):
1291 elif heads is None and remote.capable('changegroupsubset'):
1292 # issue1320, avoid a race if remote changed after discovery
1292 # issue1320, avoid a race if remote changed after discovery
1293 heads = rheads
1293 heads = rheads
1294
1294
1295 if heads is None:
1295 if heads is None:
1296 cg = remote.changegroup(fetch, 'pull')
1296 cg = remote.changegroup(fetch, 'pull')
1297 else:
1297 else:
1298 if not remote.capable('changegroupsubset'):
1298 if not remote.capable('changegroupsubset'):
1299 raise util.Abort(_("partial pull cannot be done because "
1299 raise util.Abort(_("partial pull cannot be done because "
1300 "other repository doesn't support "
1300 "other repository doesn't support "
1301 "changegroupsubset."))
1301 "changegroupsubset."))
1302 cg = remote.changegroupsubset(fetch, heads, 'pull')
1302 cg = remote.changegroupsubset(fetch, heads, 'pull')
1303 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1303 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1304 finally:
1304 finally:
1305 lock.release()
1305 lock.release()
1306
1306
1307 def push(self, remote, force=False, revs=None, newbranch=False):
1307 def push(self, remote, force=False, revs=None, newbranch=False):
1308 '''Push outgoing changesets (limited by revs) from the current
1308 '''Push outgoing changesets (limited by revs) from the current
1309 repository to remote. Return an integer:
1309 repository to remote. Return an integer:
1310 - 0 means HTTP error *or* nothing to push
1310 - 0 means HTTP error *or* nothing to push
1311 - 1 means we pushed and remote head count is unchanged *or*
1311 - 1 means we pushed and remote head count is unchanged *or*
1312 we have outgoing changesets but refused to push
1312 we have outgoing changesets but refused to push
1313 - other values as described by addchangegroup()
1313 - other values as described by addchangegroup()
1314 '''
1314 '''
1315 # there are two ways to push to remote repo:
1315 # there are two ways to push to remote repo:
1316 #
1316 #
1317 # addchangegroup assumes local user can lock remote
1317 # addchangegroup assumes local user can lock remote
1318 # repo (local filesystem, old ssh servers).
1318 # repo (local filesystem, old ssh servers).
1319 #
1319 #
1320 # unbundle assumes local user cannot lock remote repo (new ssh
1320 # unbundle assumes local user cannot lock remote repo (new ssh
1321 # servers, http servers).
1321 # servers, http servers).
1322
1322
1323 lock = None
1323 lock = None
1324 unbundle = remote.capable('unbundle')
1324 unbundle = remote.capable('unbundle')
1325 if not unbundle:
1325 if not unbundle:
1326 lock = remote.lock()
1326 lock = remote.lock()
1327 try:
1327 try:
1328 ret = discovery.prepush(self, remote, force, revs, newbranch)
1328 ret = discovery.prepush(self, remote, force, revs, newbranch)
1329 if ret[0] is None:
1329 if ret[0] is None:
1330 # and here we return 0 for "nothing to push" or 1 for
1330 # and here we return 0 for "nothing to push" or 1 for
1331 # "something to push but I refuse"
1331 # "something to push but I refuse"
1332 return ret[1]
1332 return ret[1]
1333
1333
1334 cg, remote_heads = ret
1334 cg, remote_heads = ret
1335 if unbundle:
1335 if unbundle:
1336 # local repo finds heads on server, finds out what revs it must
1336 # local repo finds heads on server, finds out what revs it must
1337 # push. once revs transferred, if server finds it has
1337 # push. once revs transferred, if server finds it has
1338 # different heads (someone else won commit/push race), server
1338 # different heads (someone else won commit/push race), server
1339 # aborts.
1339 # aborts.
1340 if force:
1340 if force:
1341 remote_heads = ['force']
1341 remote_heads = ['force']
1342 # ssh: return remote's addchangegroup()
1342 # ssh: return remote's addchangegroup()
1343 # http: return remote's addchangegroup() or 0 for error
1343 # http: return remote's addchangegroup() or 0 for error
1344 return remote.unbundle(cg, remote_heads, 'push')
1344 return remote.unbundle(cg, remote_heads, 'push')
1345 else:
1345 else:
1346 # we return an integer indicating remote head count change
1346 # we return an integer indicating remote head count change
1347 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1347 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1348 finally:
1348 finally:
1349 if lock is not None:
1349 if lock is not None:
1350 lock.release()
1350 lock.release()
1351
1351
1352 def changegroupinfo(self, nodes, source):
1352 def changegroupinfo(self, nodes, source):
1353 if self.ui.verbose or source == 'bundle':
1353 if self.ui.verbose or source == 'bundle':
1354 self.ui.status(_("%d changesets found\n") % len(nodes))
1354 self.ui.status(_("%d changesets found\n") % len(nodes))
1355 if self.ui.debugflag:
1355 if self.ui.debugflag:
1356 self.ui.debug("list of changesets:\n")
1356 self.ui.debug("list of changesets:\n")
1357 for node in nodes:
1357 for node in nodes:
1358 self.ui.debug("%s\n" % hex(node))
1358 self.ui.debug("%s\n" % hex(node))
1359
1359
1360 def changegroupsubset(self, bases, heads, source, extranodes=None):
1360 def changegroupsubset(self, bases, heads, source, extranodes=None):
1361 """Compute a changegroup consisting of all the nodes that are
1361 """Compute a changegroup consisting of all the nodes that are
1362 descendents of any of the bases and ancestors of any of the heads.
1362 descendents of any of the bases and ancestors of any of the heads.
1363 Return a chunkbuffer object whose read() method will return
1363 Return a chunkbuffer object whose read() method will return
1364 successive changegroup chunks.
1364 successive changegroup chunks.
1365
1365
1366 It is fairly complex as determining which filenodes and which
1366 It is fairly complex as determining which filenodes and which
1367 manifest nodes need to be included for the changeset to be complete
1367 manifest nodes need to be included for the changeset to be complete
1368 is non-trivial.
1368 is non-trivial.
1369
1369
1370 Another wrinkle is doing the reverse, figuring out which changeset in
1370 Another wrinkle is doing the reverse, figuring out which changeset in
1371 the changegroup a particular filenode or manifestnode belongs to.
1371 the changegroup a particular filenode or manifestnode belongs to.
1372
1372
1373 The caller can specify some nodes that must be included in the
1373 The caller can specify some nodes that must be included in the
1374 changegroup using the extranodes argument. It should be a dict
1374 changegroup using the extranodes argument. It should be a dict
1375 where the keys are the filenames (or 1 for the manifest), and the
1375 where the keys are the filenames (or 1 for the manifest), and the
1376 values are lists of (node, linknode) tuples, where node is a wanted
1376 values are lists of (node, linknode) tuples, where node is a wanted
1377 node and linknode is the changelog node that should be transmitted as
1377 node and linknode is the changelog node that should be transmitted as
1378 the linkrev.
1378 the linkrev.
1379 """
1379 """
1380
1380
1381 # Set up some initial variables
1381 # Set up some initial variables
1382 # Make it easy to refer to self.changelog
1382 # Make it easy to refer to self.changelog
1383 cl = self.changelog
1383 cl = self.changelog
1384 # Compute the list of changesets in this changegroup.
1384 # Compute the list of changesets in this changegroup.
1385 # Some bases may turn out to be superfluous, and some heads may be
1385 # Some bases may turn out to be superfluous, and some heads may be
1386 # too. nodesbetween will return the minimal set of bases and heads
1386 # too. nodesbetween will return the minimal set of bases and heads
1387 # necessary to re-create the changegroup.
1387 # necessary to re-create the changegroup.
1388 if not bases:
1388 if not bases:
1389 bases = [nullid]
1389 bases = [nullid]
1390 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1390 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1391
1391
1392 if extranodes is None:
1392 if extranodes is None:
1393 # can we go through the fast path ?
1393 # can we go through the fast path ?
1394 heads.sort()
1394 heads.sort()
1395 allheads = self.heads()
1395 allheads = self.heads()
1396 allheads.sort()
1396 allheads.sort()
1397 if heads == allheads:
1397 if heads == allheads:
1398 return self._changegroup(msng_cl_lst, source)
1398 return self._changegroup(msng_cl_lst, source)
1399
1399
1400 # slow path
1400 # slow path
1401 self.hook('preoutgoing', throw=True, source=source)
1401 self.hook('preoutgoing', throw=True, source=source)
1402
1402
1403 self.changegroupinfo(msng_cl_lst, source)
1403 self.changegroupinfo(msng_cl_lst, source)
1404
1404
1405 # We assume that all ancestors of bases are known
1405 # We assume that all ancestors of bases are known
1406 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1406 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1407
1407
1408 # Make it easy to refer to self.manifest
1408 # Make it easy to refer to self.manifest
1409 mnfst = self.manifest
1409 mnfst = self.manifest
1410 # We don't know which manifests are missing yet
1410 # We don't know which manifests are missing yet
1411 msng_mnfst_set = {}
1411 msng_mnfst_set = {}
1412 # Nor do we know which filenodes are missing.
1412 # Nor do we know which filenodes are missing.
1413 msng_filenode_set = {}
1413 msng_filenode_set = {}
1414
1414
1415 # A changeset always belongs to itself, so the changenode lookup
1415 # A changeset always belongs to itself, so the changenode lookup
1416 # function for a changenode is identity.
1416 # function for a changenode is identity.
1417 def identity(x):
1417 def identity(x):
1418 return x
1418 return x
1419
1419
1420 # A function generating function that sets up the initial environment
1420 # A function generating function that sets up the initial environment
1421 # the inner function.
1421 # the inner function.
1422 def filenode_collector(changedfiles):
1422 def filenode_collector(changedfiles):
1423 # This gathers information from each manifestnode included in the
1423 # This gathers information from each manifestnode included in the
1424 # changegroup about which filenodes the manifest node references
1424 # changegroup about which filenodes the manifest node references
1425 # so we can include those in the changegroup too.
1425 # so we can include those in the changegroup too.
1426 #
1426 #
1427 # It also remembers which changenode each filenode belongs to. It
1427 # It also remembers which changenode each filenode belongs to. It
1428 # does this by assuming the a filenode belongs to the changenode
1428 # does this by assuming the a filenode belongs to the changenode
1429 # the first manifest that references it belongs to.
1429 # the first manifest that references it belongs to.
1430 def collect_msng_filenodes(mnfstnode):
1430 def collect_msng_filenodes(mnfstnode):
1431 r = mnfst.rev(mnfstnode)
1431 r = mnfst.rev(mnfstnode)
1432 if mnfst.deltaparent(r) in mnfst.parentrevs(r):
1432 if mnfst.deltaparent(r) in mnfst.parentrevs(r):
1433 # If the previous rev is one of the parents,
1433 # If the previous rev is one of the parents,
1434 # we only need to see a diff.
1434 # we only need to see a diff.
1435 deltamf = mnfst.readdelta(mnfstnode)
1435 deltamf = mnfst.readdelta(mnfstnode)
1436 # For each line in the delta
1436 # For each line in the delta
1437 for f, fnode in deltamf.iteritems():
1437 for f, fnode in deltamf.iteritems():
1438 # And if the file is in the list of files we care
1438 # And if the file is in the list of files we care
1439 # about.
1439 # about.
1440 if f in changedfiles:
1440 if f in changedfiles:
1441 # Get the changenode this manifest belongs to
1441 # Get the changenode this manifest belongs to
1442 clnode = msng_mnfst_set[mnfstnode]
1442 clnode = msng_mnfst_set[mnfstnode]
1443 # Create the set of filenodes for the file if
1443 # Create the set of filenodes for the file if
1444 # there isn't one already.
1444 # there isn't one already.
1445 ndset = msng_filenode_set.setdefault(f, {})
1445 ndset = msng_filenode_set.setdefault(f, {})
1446 # And set the filenode's changelog node to the
1446 # And set the filenode's changelog node to the
1447 # manifest's if it hasn't been set already.
1447 # manifest's if it hasn't been set already.
1448 ndset.setdefault(fnode, clnode)
1448 ndset.setdefault(fnode, clnode)
1449 else:
1449 else:
1450 # Otherwise we need a full manifest.
1450 # Otherwise we need a full manifest.
1451 m = mnfst.read(mnfstnode)
1451 m = mnfst.read(mnfstnode)
1452 # For every file in we care about.
1452 # For every file in we care about.
1453 for f in changedfiles:
1453 for f in changedfiles:
1454 fnode = m.get(f, None)
1454 fnode = m.get(f, None)
1455 # If it's in the manifest
1455 # If it's in the manifest
1456 if fnode is not None:
1456 if fnode is not None:
1457 # See comments above.
1457 # See comments above.
1458 clnode = msng_mnfst_set[mnfstnode]
1458 clnode = msng_mnfst_set[mnfstnode]
1459 ndset = msng_filenode_set.setdefault(f, {})
1459 ndset = msng_filenode_set.setdefault(f, {})
1460 ndset.setdefault(fnode, clnode)
1460 ndset.setdefault(fnode, clnode)
1461 return collect_msng_filenodes
1461 return collect_msng_filenodes
1462
1462
1463 # If we determine that a particular file or manifest node must be a
1463 # If we determine that a particular file or manifest node must be a
1464 # node that the recipient of the changegroup will already have, we can
1464 # node that the recipient of the changegroup will already have, we can
1465 # also assume the recipient will have all the parents. This function
1465 # also assume the recipient will have all the parents. This function
1466 # prunes them from the set of missing nodes.
1466 # prunes them from the set of missing nodes.
1467 def prune(revlog, missingnodes):
1467 def prune(revlog, missingnodes):
1468 hasset = set()
1468 hasset = set()
1469 # If a 'missing' filenode thinks it belongs to a changenode we
1469 # If a 'missing' filenode thinks it belongs to a changenode we
1470 # assume the recipient must have, then the recipient must have
1470 # assume the recipient must have, then the recipient must have
1471 # that filenode.
1471 # that filenode.
1472 for n in missingnodes:
1472 for n in missingnodes:
1473 clrev = revlog.linkrev(revlog.rev(n))
1473 clrev = revlog.linkrev(revlog.rev(n))
1474 if clrev in commonrevs:
1474 if clrev in commonrevs:
1475 hasset.add(n)
1475 hasset.add(n)
1476 for n in hasset:
1476 for n in hasset:
1477 missingnodes.pop(n, None)
1477 missingnodes.pop(n, None)
1478 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1478 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1479 missingnodes.pop(revlog.node(r), None)
1479 missingnodes.pop(revlog.node(r), None)
1480
1480
1481 # Add the nodes that were explicitly requested.
1481 # Add the nodes that were explicitly requested.
1482 def add_extra_nodes(name, nodes):
1482 def add_extra_nodes(name, nodes):
1483 if not extranodes or name not in extranodes:
1483 if not extranodes or name not in extranodes:
1484 return
1484 return
1485
1485
1486 for node, linknode in extranodes[name]:
1486 for node, linknode in extranodes[name]:
1487 if node not in nodes:
1487 if node not in nodes:
1488 nodes[node] = linknode
1488 nodes[node] = linknode
1489
1489
1490 # Now that we have all theses utility functions to help out and
1490 # Now that we have all theses utility functions to help out and
1491 # logically divide up the task, generate the group.
1491 # logically divide up the task, generate the group.
1492 def gengroup():
1492 def gengroup():
1493 # The set of changed files starts empty.
1493 # The set of changed files starts empty.
1494 changedfiles = set()
1494 changedfiles = set()
1495 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1495 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1496
1496
1497 # Create a changenode group generator that will call our functions
1497 # Create a changenode group generator that will call our functions
1498 # back to lookup the owning changenode and collect information.
1498 # back to lookup the owning changenode and collect information.
1499 group = cl.group(msng_cl_lst, identity, collect)
1499 group = cl.group(msng_cl_lst, identity, collect)
1500 for cnt, chnk in enumerate(group):
1500 for cnt, chnk in enumerate(group):
1501 yield chnk
1501 yield chnk
1502 # revlog.group yields three entries per node, so
1502 # revlog.group yields three entries per node, so
1503 # dividing by 3 gives an approximation of how many
1503 # dividing by 3 gives an approximation of how many
1504 # nodes have been processed.
1504 # nodes have been processed.
1505 self.ui.progress(_('bundling'), cnt / 3,
1505 self.ui.progress(_('bundling'), cnt / 3,
1506 unit=_('changesets'))
1506 unit=_('changesets'))
1507 changecount = cnt / 3
1507 changecount = cnt / 3
1508 self.ui.progress(_('bundling'), None)
1508 self.ui.progress(_('bundling'), None)
1509
1509
1510 prune(mnfst, msng_mnfst_set)
1510 prune(mnfst, msng_mnfst_set)
1511 add_extra_nodes(1, msng_mnfst_set)
1511 add_extra_nodes(1, msng_mnfst_set)
1512 msng_mnfst_lst = msng_mnfst_set.keys()
1512 msng_mnfst_lst = msng_mnfst_set.keys()
1513 # Sort the manifestnodes by revision number.
1513 # Sort the manifestnodes by revision number.
1514 msng_mnfst_lst.sort(key=mnfst.rev)
1514 msng_mnfst_lst.sort(key=mnfst.rev)
1515 # Create a generator for the manifestnodes that calls our lookup
1515 # Create a generator for the manifestnodes that calls our lookup
1516 # and data collection functions back.
1516 # and data collection functions back.
1517 group = mnfst.group(msng_mnfst_lst,
1517 group = mnfst.group(msng_mnfst_lst,
1518 lambda mnode: msng_mnfst_set[mnode],
1518 lambda mnode: msng_mnfst_set[mnode],
1519 filenode_collector(changedfiles))
1519 filenode_collector(changedfiles))
1520 efiles = {}
1520 efiles = {}
1521 for cnt, chnk in enumerate(group):
1521 for cnt, chnk in enumerate(group):
1522 if cnt % 3 == 1:
1522 if cnt % 3 == 1:
1523 mnode = chnk[:20]
1523 mnode = chnk[:20]
1524 efiles.update(mnfst.readdelta(mnode))
1524 efiles.update(mnfst.readdelta(mnode))
1525 yield chnk
1525 yield chnk
1526 # see above comment for why we divide by 3
1526 # see above comment for why we divide by 3
1527 self.ui.progress(_('bundling'), cnt / 3,
1527 self.ui.progress(_('bundling'), cnt / 3,
1528 unit=_('manifests'), total=changecount)
1528 unit=_('manifests'), total=changecount)
1529 self.ui.progress(_('bundling'), None)
1529 self.ui.progress(_('bundling'), None)
1530 efiles = len(efiles)
1530 efiles = len(efiles)
1531
1531
1532 # These are no longer needed, dereference and toss the memory for
1532 # These are no longer needed, dereference and toss the memory for
1533 # them.
1533 # them.
1534 msng_mnfst_lst = None
1534 msng_mnfst_lst = None
1535 msng_mnfst_set.clear()
1535 msng_mnfst_set.clear()
1536
1536
1537 if extranodes:
1537 if extranodes:
1538 for fname in extranodes:
1538 for fname in extranodes:
1539 if isinstance(fname, int):
1539 if isinstance(fname, int):
1540 continue
1540 continue
1541 msng_filenode_set.setdefault(fname, {})
1541 msng_filenode_set.setdefault(fname, {})
1542 changedfiles.add(fname)
1542 changedfiles.add(fname)
1543 # Go through all our files in order sorted by name.
1543 # Go through all our files in order sorted by name.
1544 for idx, fname in enumerate(sorted(changedfiles)):
1544 for idx, fname in enumerate(sorted(changedfiles)):
1545 filerevlog = self.file(fname)
1545 filerevlog = self.file(fname)
1546 if not len(filerevlog):
1546 if not len(filerevlog):
1547 raise util.Abort(_("empty or missing revlog for %s") % fname)
1547 raise util.Abort(_("empty or missing revlog for %s") % fname)
1548 # Toss out the filenodes that the recipient isn't really
1548 # Toss out the filenodes that the recipient isn't really
1549 # missing.
1549 # missing.
1550 missingfnodes = msng_filenode_set.pop(fname, {})
1550 missingfnodes = msng_filenode_set.pop(fname, {})
1551 prune(filerevlog, missingfnodes)
1551 prune(filerevlog, missingfnodes)
1552 add_extra_nodes(fname, missingfnodes)
1552 add_extra_nodes(fname, missingfnodes)
1553 # If any filenodes are left, generate the group for them,
1553 # If any filenodes are left, generate the group for them,
1554 # otherwise don't bother.
1554 # otherwise don't bother.
1555 if missingfnodes:
1555 if missingfnodes:
1556 yield changegroup.chunkheader(len(fname))
1556 yield changegroup.chunkheader(len(fname))
1557 yield fname
1557 yield fname
1558 # Sort the filenodes by their revision # (topological order)
1558 # Sort the filenodes by their revision # (topological order)
1559 nodeiter = list(missingfnodes)
1559 nodeiter = list(missingfnodes)
1560 nodeiter.sort(key=filerevlog.rev)
1560 nodeiter.sort(key=filerevlog.rev)
1561 # Create a group generator and only pass in a changenode
1561 # Create a group generator and only pass in a changenode
1562 # lookup function as we need to collect no information
1562 # lookup function as we need to collect no information
1563 # from filenodes.
1563 # from filenodes.
1564 group = filerevlog.group(nodeiter,
1564 group = filerevlog.group(nodeiter,
1565 lambda fnode: missingfnodes[fnode])
1565 lambda fnode: missingfnodes[fnode])
1566 for chnk in group:
1566 for chnk in group:
1567 # even though we print the same progress on
1567 # even though we print the same progress on
1568 # most loop iterations, put the progress call
1568 # most loop iterations, put the progress call
1569 # here so that time estimates (if any) can be updated
1569 # here so that time estimates (if any) can be updated
1570 self.ui.progress(
1570 self.ui.progress(
1571 _('bundling'), idx, item=fname,
1571 _('bundling'), idx, item=fname,
1572 unit=_('files'), total=efiles)
1572 unit=_('files'), total=efiles)
1573 yield chnk
1573 yield chnk
1574 # Signal that no more groups are left.
1574 # Signal that no more groups are left.
1575 yield changegroup.closechunk()
1575 yield changegroup.closechunk()
1576 self.ui.progress(_('bundling'), None)
1576 self.ui.progress(_('bundling'), None)
1577
1577
1578 if msng_cl_lst:
1578 if msng_cl_lst:
1579 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1579 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1580
1580
1581 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1581 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1582
1582
1583 def changegroup(self, basenodes, source):
1583 def changegroup(self, basenodes, source):
1584 # to avoid a race we use changegroupsubset() (issue1320)
1584 # to avoid a race we use changegroupsubset() (issue1320)
1585 return self.changegroupsubset(basenodes, self.heads(), source)
1585 return self.changegroupsubset(basenodes, self.heads(), source)
1586
1586
1587 def _changegroup(self, nodes, source):
1587 def _changegroup(self, nodes, source):
1588 """Compute the changegroup of all nodes that we have that a recipient
1588 """Compute the changegroup of all nodes that we have that a recipient
1589 doesn't. Return a chunkbuffer object whose read() method will return
1589 doesn't. Return a chunkbuffer object whose read() method will return
1590 successive changegroup chunks.
1590 successive changegroup chunks.
1591
1591
1592 This is much easier than the previous function as we can assume that
1592 This is much easier than the previous function as we can assume that
1593 the recipient has any changenode we aren't sending them.
1593 the recipient has any changenode we aren't sending them.
1594
1594
1595 nodes is the set of nodes to send"""
1595 nodes is the set of nodes to send"""
1596
1596
1597 self.hook('preoutgoing', throw=True, source=source)
1597 self.hook('preoutgoing', throw=True, source=source)
1598
1598
1599 cl = self.changelog
1599 cl = self.changelog
1600 revset = set([cl.rev(n) for n in nodes])
1600 revset = set([cl.rev(n) for n in nodes])
1601 self.changegroupinfo(nodes, source)
1601 self.changegroupinfo(nodes, source)
1602
1602
1603 def identity(x):
1603 def identity(x):
1604 return x
1604 return x
1605
1605
1606 def gennodelst(log):
1606 def gennodelst(log):
1607 for r in log:
1607 for r in log:
1608 if log.linkrev(r) in revset:
1608 if log.linkrev(r) in revset:
1609 yield log.node(r)
1609 yield log.node(r)
1610
1610
1611 def lookuplinkrev_func(revlog):
1611 def lookuplinkrev_func(revlog):
1612 def lookuplinkrev(n):
1612 def lookuplinkrev(n):
1613 return cl.node(revlog.linkrev(revlog.rev(n)))
1613 return cl.node(revlog.linkrev(revlog.rev(n)))
1614 return lookuplinkrev
1614 return lookuplinkrev
1615
1615
1616 def gengroup():
1616 def gengroup():
1617 '''yield a sequence of changegroup chunks (strings)'''
1617 '''yield a sequence of changegroup chunks (strings)'''
1618 # construct a list of all changed files
1618 # construct a list of all changed files
1619 changedfiles = set()
1619 changedfiles = set()
1620 mmfs = {}
1620 mmfs = {}
1621 collect = changegroup.collector(cl, mmfs, changedfiles)
1621 collect = changegroup.collector(cl, mmfs, changedfiles)
1622
1622
1623 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1623 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1624 # revlog.group yields three entries per node, so
1624 # revlog.group yields three entries per node, so
1625 # dividing by 3 gives an approximation of how many
1625 # dividing by 3 gives an approximation of how many
1626 # nodes have been processed.
1626 # nodes have been processed.
1627 self.ui.progress(_('bundling'), cnt / 3, unit=_('changesets'))
1627 self.ui.progress(_('bundling'), cnt / 3, unit=_('changesets'))
1628 yield chnk
1628 yield chnk
1629 changecount = cnt / 3
1629 changecount = cnt / 3
1630 self.ui.progress(_('bundling'), None)
1630 self.ui.progress(_('bundling'), None)
1631
1631
1632 mnfst = self.manifest
1632 mnfst = self.manifest
1633 nodeiter = gennodelst(mnfst)
1633 nodeiter = gennodelst(mnfst)
1634 efiles = {}
1634 efiles = {}
1635 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1635 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1636 lookuplinkrev_func(mnfst))):
1636 lookuplinkrev_func(mnfst))):
1637 if cnt % 3 == 1:
1637 if cnt % 3 == 1:
1638 mnode = chnk[:20]
1638 mnode = chnk[:20]
1639 efiles.update(mnfst.readdelta(mnode))
1639 efiles.update(mnfst.readdelta(mnode))
1640 # see above comment for why we divide by 3
1640 # see above comment for why we divide by 3
1641 self.ui.progress(_('bundling'), cnt / 3,
1641 self.ui.progress(_('bundling'), cnt / 3,
1642 unit=_('manifests'), total=changecount)
1642 unit=_('manifests'), total=changecount)
1643 yield chnk
1643 yield chnk
1644 efiles = len(efiles)
1644 efiles = len(efiles)
1645 self.ui.progress(_('bundling'), None)
1645 self.ui.progress(_('bundling'), None)
1646
1646
1647 for idx, fname in enumerate(sorted(changedfiles)):
1647 for idx, fname in enumerate(sorted(changedfiles)):
1648 filerevlog = self.file(fname)
1648 filerevlog = self.file(fname)
1649 if not len(filerevlog):
1649 if not len(filerevlog):
1650 raise util.Abort(_("empty or missing revlog for %s") % fname)
1650 raise util.Abort(_("empty or missing revlog for %s") % fname)
1651 nodeiter = gennodelst(filerevlog)
1651 nodeiter = gennodelst(filerevlog)
1652 nodeiter = list(nodeiter)
1652 nodeiter = list(nodeiter)
1653 if nodeiter:
1653 if nodeiter:
1654 yield changegroup.chunkheader(len(fname))
1654 yield changegroup.chunkheader(len(fname))
1655 yield fname
1655 yield fname
1656 lookup = lookuplinkrev_func(filerevlog)
1656 lookup = lookuplinkrev_func(filerevlog)
1657 for chnk in filerevlog.group(nodeiter, lookup):
1657 for chnk in filerevlog.group(nodeiter, lookup):
1658 self.ui.progress(
1658 self.ui.progress(
1659 _('bundling'), idx, item=fname,
1659 _('bundling'), idx, item=fname,
1660 total=efiles, unit=_('files'))
1660 total=efiles, unit=_('files'))
1661 yield chnk
1661 yield chnk
1662 self.ui.progress(_('bundling'), None)
1662 self.ui.progress(_('bundling'), None)
1663
1663
1664 yield changegroup.closechunk()
1664 yield changegroup.closechunk()
1665
1665
1666 if nodes:
1666 if nodes:
1667 self.hook('outgoing', node=hex(nodes[0]), source=source)
1667 self.hook('outgoing', node=hex(nodes[0]), source=source)
1668
1668
1669 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1669 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1670
1670
1671 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1671 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1672 """Add the changegroup returned by source.read() to this repo.
1672 """Add the changegroup returned by source.read() to this repo.
1673 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1673 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1674 the URL of the repo where this changegroup is coming from.
1674 the URL of the repo where this changegroup is coming from.
1675 If lock is not None, the function takes ownership of the lock
1676 and releases it after the changegroup is added.
1675
1677
1676 Return an integer summarizing the change to this repo:
1678 Return an integer summarizing the change to this repo:
1677 - nothing changed or no source: 0
1679 - nothing changed or no source: 0
1678 - more heads than before: 1+added heads (2..n)
1680 - more heads than before: 1+added heads (2..n)
1679 - fewer heads than before: -1-removed heads (-2..-n)
1681 - fewer heads than before: -1-removed heads (-2..-n)
1680 - number of heads stays the same: 1
1682 - number of heads stays the same: 1
1681 """
1683 """
1682 def csmap(x):
1684 def csmap(x):
1683 self.ui.debug("add changeset %s\n" % short(x))
1685 self.ui.debug("add changeset %s\n" % short(x))
1684 return len(cl)
1686 return len(cl)
1685
1687
1686 def revmap(x):
1688 def revmap(x):
1687 return cl.rev(x)
1689 return cl.rev(x)
1688
1690
1689 if not source:
1691 if not source:
1690 return 0
1692 return 0
1691
1693
1692 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1694 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1693
1695
1694 changesets = files = revisions = 0
1696 changesets = files = revisions = 0
1695 efiles = set()
1697 efiles = set()
1696
1698
1697 # write changelog data to temp files so concurrent readers will not see
1699 # write changelog data to temp files so concurrent readers will not see
1698 # inconsistent view
1700 # inconsistent view
1699 cl = self.changelog
1701 cl = self.changelog
1700 cl.delayupdate()
1702 cl.delayupdate()
1701 oldheads = len(cl.heads())
1703 oldheads = len(cl.heads())
1702
1704
1703 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1705 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1704 try:
1706 try:
1705 trp = weakref.proxy(tr)
1707 trp = weakref.proxy(tr)
1706 # pull off the changeset group
1708 # pull off the changeset group
1707 self.ui.status(_("adding changesets\n"))
1709 self.ui.status(_("adding changesets\n"))
1708 clstart = len(cl)
1710 clstart = len(cl)
1709 class prog(object):
1711 class prog(object):
1710 step = _('changesets')
1712 step = _('changesets')
1711 count = 1
1713 count = 1
1712 ui = self.ui
1714 ui = self.ui
1713 total = None
1715 total = None
1714 def __call__(self):
1716 def __call__(self):
1715 self.ui.progress(self.step, self.count, unit=_('chunks'),
1717 self.ui.progress(self.step, self.count, unit=_('chunks'),
1716 total=self.total)
1718 total=self.total)
1717 self.count += 1
1719 self.count += 1
1718 pr = prog()
1720 pr = prog()
1719 source.callback = pr
1721 source.callback = pr
1720
1722
1721 if (cl.addgroup(source, csmap, trp) is None
1723 if (cl.addgroup(source, csmap, trp) is None
1722 and not emptyok):
1724 and not emptyok):
1723 raise util.Abort(_("received changelog group is empty"))
1725 raise util.Abort(_("received changelog group is empty"))
1724 clend = len(cl)
1726 clend = len(cl)
1725 changesets = clend - clstart
1727 changesets = clend - clstart
1726 for c in xrange(clstart, clend):
1728 for c in xrange(clstart, clend):
1727 efiles.update(self[c].files())
1729 efiles.update(self[c].files())
1728 efiles = len(efiles)
1730 efiles = len(efiles)
1729 self.ui.progress(_('changesets'), None)
1731 self.ui.progress(_('changesets'), None)
1730
1732
1731 # pull off the manifest group
1733 # pull off the manifest group
1732 self.ui.status(_("adding manifests\n"))
1734 self.ui.status(_("adding manifests\n"))
1733 pr.step = _('manifests')
1735 pr.step = _('manifests')
1734 pr.count = 1
1736 pr.count = 1
1735 pr.total = changesets # manifests <= changesets
1737 pr.total = changesets # manifests <= changesets
1736 # no need to check for empty manifest group here:
1738 # no need to check for empty manifest group here:
1737 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1739 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1738 # no new manifest will be created and the manifest group will
1740 # no new manifest will be created and the manifest group will
1739 # be empty during the pull
1741 # be empty during the pull
1740 self.manifest.addgroup(source, revmap, trp)
1742 self.manifest.addgroup(source, revmap, trp)
1741 self.ui.progress(_('manifests'), None)
1743 self.ui.progress(_('manifests'), None)
1742
1744
1743 needfiles = {}
1745 needfiles = {}
1744 if self.ui.configbool('server', 'validate', default=False):
1746 if self.ui.configbool('server', 'validate', default=False):
1745 # validate incoming csets have their manifests
1747 # validate incoming csets have their manifests
1746 for cset in xrange(clstart, clend):
1748 for cset in xrange(clstart, clend):
1747 mfest = self.changelog.read(self.changelog.node(cset))[0]
1749 mfest = self.changelog.read(self.changelog.node(cset))[0]
1748 mfest = self.manifest.readdelta(mfest)
1750 mfest = self.manifest.readdelta(mfest)
1749 # store file nodes we must see
1751 # store file nodes we must see
1750 for f, n in mfest.iteritems():
1752 for f, n in mfest.iteritems():
1751 needfiles.setdefault(f, set()).add(n)
1753 needfiles.setdefault(f, set()).add(n)
1752
1754
1753 # process the files
1755 # process the files
1754 self.ui.status(_("adding file changes\n"))
1756 self.ui.status(_("adding file changes\n"))
1755 pr.step = 'files'
1757 pr.step = 'files'
1756 pr.count = 1
1758 pr.count = 1
1757 pr.total = efiles
1759 pr.total = efiles
1758 source.callback = None
1760 source.callback = None
1759
1761
1760 while 1:
1762 while 1:
1761 f = source.chunk()
1763 f = source.chunk()
1762 if not f:
1764 if not f:
1763 break
1765 break
1764 self.ui.debug("adding %s revisions\n" % f)
1766 self.ui.debug("adding %s revisions\n" % f)
1765 pr()
1767 pr()
1766 fl = self.file(f)
1768 fl = self.file(f)
1767 o = len(fl)
1769 o = len(fl)
1768 if fl.addgroup(source, revmap, trp) is None:
1770 if fl.addgroup(source, revmap, trp) is None:
1769 raise util.Abort(_("received file revlog group is empty"))
1771 raise util.Abort(_("received file revlog group is empty"))
1770 revisions += len(fl) - o
1772 revisions += len(fl) - o
1771 files += 1
1773 files += 1
1772 if f in needfiles:
1774 if f in needfiles:
1773 needs = needfiles[f]
1775 needs = needfiles[f]
1774 for new in xrange(o, len(fl)):
1776 for new in xrange(o, len(fl)):
1775 n = fl.node(new)
1777 n = fl.node(new)
1776 if n in needs:
1778 if n in needs:
1777 needs.remove(n)
1779 needs.remove(n)
1778 if not needs:
1780 if not needs:
1779 del needfiles[f]
1781 del needfiles[f]
1780 self.ui.progress(_('files'), None)
1782 self.ui.progress(_('files'), None)
1781
1783
1782 for f, needs in needfiles.iteritems():
1784 for f, needs in needfiles.iteritems():
1783 fl = self.file(f)
1785 fl = self.file(f)
1784 for n in needs:
1786 for n in needs:
1785 try:
1787 try:
1786 fl.rev(n)
1788 fl.rev(n)
1787 except error.LookupError:
1789 except error.LookupError:
1788 raise util.Abort(
1790 raise util.Abort(
1789 _('missing file data for %s:%s - run hg verify') %
1791 _('missing file data for %s:%s - run hg verify') %
1790 (f, hex(n)))
1792 (f, hex(n)))
1791
1793
1792 newheads = len(cl.heads())
1794 newheads = len(cl.heads())
1793 heads = ""
1795 heads = ""
1794 if oldheads and newheads != oldheads:
1796 if oldheads and newheads != oldheads:
1795 heads = _(" (%+d heads)") % (newheads - oldheads)
1797 heads = _(" (%+d heads)") % (newheads - oldheads)
1796
1798
1797 self.ui.status(_("added %d changesets"
1799 self.ui.status(_("added %d changesets"
1798 " with %d changes to %d files%s\n")
1800 " with %d changes to %d files%s\n")
1799 % (changesets, revisions, files, heads))
1801 % (changesets, revisions, files, heads))
1800
1802
1801 if changesets > 0:
1803 if changesets > 0:
1802 p = lambda: cl.writepending() and self.root or ""
1804 p = lambda: cl.writepending() and self.root or ""
1803 self.hook('pretxnchangegroup', throw=True,
1805 self.hook('pretxnchangegroup', throw=True,
1804 node=hex(cl.node(clstart)), source=srctype,
1806 node=hex(cl.node(clstart)), source=srctype,
1805 url=url, pending=p)
1807 url=url, pending=p)
1806
1808
1807 # make changelog see real files again
1809 # make changelog see real files again
1808 cl.finalize(trp)
1810 cl.finalize(trp)
1809
1811
1810 tr.close()
1812 tr.close()
1811 finally:
1813 finally:
1812 tr.release()
1814 tr.release()
1813 if lock:
1815 if lock:
1814 lock.release()
1816 lock.release()
1815
1817
1816 if changesets > 0:
1818 if changesets > 0:
1817 # forcefully update the on-disk branch cache
1819 # forcefully update the on-disk branch cache
1818 self.ui.debug("updating the branch cache\n")
1820 self.ui.debug("updating the branch cache\n")
1819 self.updatebranchcache()
1821 self.updatebranchcache()
1820 self.hook("changegroup", node=hex(cl.node(clstart)),
1822 self.hook("changegroup", node=hex(cl.node(clstart)),
1821 source=srctype, url=url)
1823 source=srctype, url=url)
1822
1824
1823 for i in xrange(clstart, clend):
1825 for i in xrange(clstart, clend):
1824 self.hook("incoming", node=hex(cl.node(i)),
1826 self.hook("incoming", node=hex(cl.node(i)),
1825 source=srctype, url=url)
1827 source=srctype, url=url)
1826
1828
1827 # never return 0 here:
1829 # never return 0 here:
1828 if newheads < oldheads:
1830 if newheads < oldheads:
1829 return newheads - oldheads - 1
1831 return newheads - oldheads - 1
1830 else:
1832 else:
1831 return newheads - oldheads + 1
1833 return newheads - oldheads + 1
1832
1834
1833
1835
1834 def stream_in(self, remote, requirements):
1836 def stream_in(self, remote, requirements):
1835 fp = remote.stream_out()
1837 fp = remote.stream_out()
1836 l = fp.readline()
1838 l = fp.readline()
1837 try:
1839 try:
1838 resp = int(l)
1840 resp = int(l)
1839 except ValueError:
1841 except ValueError:
1840 raise error.ResponseError(
1842 raise error.ResponseError(
1841 _('Unexpected response from remote server:'), l)
1843 _('Unexpected response from remote server:'), l)
1842 if resp == 1:
1844 if resp == 1:
1843 raise util.Abort(_('operation forbidden by server'))
1845 raise util.Abort(_('operation forbidden by server'))
1844 elif resp == 2:
1846 elif resp == 2:
1845 raise util.Abort(_('locking the remote repository failed'))
1847 raise util.Abort(_('locking the remote repository failed'))
1846 elif resp != 0:
1848 elif resp != 0:
1847 raise util.Abort(_('the server sent an unknown error code'))
1849 raise util.Abort(_('the server sent an unknown error code'))
1848 self.ui.status(_('streaming all changes\n'))
1850 self.ui.status(_('streaming all changes\n'))
1849 l = fp.readline()
1851 l = fp.readline()
1850 try:
1852 try:
1851 total_files, total_bytes = map(int, l.split(' ', 1))
1853 total_files, total_bytes = map(int, l.split(' ', 1))
1852 except (ValueError, TypeError):
1854 except (ValueError, TypeError):
1853 raise error.ResponseError(
1855 raise error.ResponseError(
1854 _('Unexpected response from remote server:'), l)
1856 _('Unexpected response from remote server:'), l)
1855 self.ui.status(_('%d files to transfer, %s of data\n') %
1857 self.ui.status(_('%d files to transfer, %s of data\n') %
1856 (total_files, util.bytecount(total_bytes)))
1858 (total_files, util.bytecount(total_bytes)))
1857 start = time.time()
1859 start = time.time()
1858 for i in xrange(total_files):
1860 for i in xrange(total_files):
1859 # XXX doesn't support '\n' or '\r' in filenames
1861 # XXX doesn't support '\n' or '\r' in filenames
1860 l = fp.readline()
1862 l = fp.readline()
1861 try:
1863 try:
1862 name, size = l.split('\0', 1)
1864 name, size = l.split('\0', 1)
1863 size = int(size)
1865 size = int(size)
1864 except (ValueError, TypeError):
1866 except (ValueError, TypeError):
1865 raise error.ResponseError(
1867 raise error.ResponseError(
1866 _('Unexpected response from remote server:'), l)
1868 _('Unexpected response from remote server:'), l)
1867 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1869 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1868 # for backwards compat, name was partially encoded
1870 # for backwards compat, name was partially encoded
1869 ofp = self.sopener(store.decodedir(name), 'w')
1871 ofp = self.sopener(store.decodedir(name), 'w')
1870 for chunk in util.filechunkiter(fp, limit=size):
1872 for chunk in util.filechunkiter(fp, limit=size):
1871 ofp.write(chunk)
1873 ofp.write(chunk)
1872 ofp.close()
1874 ofp.close()
1873 elapsed = time.time() - start
1875 elapsed = time.time() - start
1874 if elapsed <= 0:
1876 if elapsed <= 0:
1875 elapsed = 0.001
1877 elapsed = 0.001
1876 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1878 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1877 (util.bytecount(total_bytes), elapsed,
1879 (util.bytecount(total_bytes), elapsed,
1878 util.bytecount(total_bytes / elapsed)))
1880 util.bytecount(total_bytes / elapsed)))
1879
1881
1880 # new requirements = old non-format requirements + new format-related
1882 # new requirements = old non-format requirements + new format-related
1881 # requirements from the streamed-in repository
1883 # requirements from the streamed-in repository
1882 requirements.update(set(self.requirements) - self.supportedformats)
1884 requirements.update(set(self.requirements) - self.supportedformats)
1883 self._applyrequirements(requirements)
1885 self._applyrequirements(requirements)
1884 self._writerequirements()
1886 self._writerequirements()
1885
1887
1886 self.invalidate()
1888 self.invalidate()
1887 return len(self.heads()) + 1
1889 return len(self.heads()) + 1
1888
1890
1889 def clone(self, remote, heads=[], stream=False):
1891 def clone(self, remote, heads=[], stream=False):
1890 '''clone remote repository.
1892 '''clone remote repository.
1891
1893
1892 keyword arguments:
1894 keyword arguments:
1893 heads: list of revs to clone (forces use of pull)
1895 heads: list of revs to clone (forces use of pull)
1894 stream: use streaming clone if possible'''
1896 stream: use streaming clone if possible'''
1895
1897
1896 # now, all clients that can request uncompressed clones can
1898 # now, all clients that can request uncompressed clones can
1897 # read repo formats supported by all servers that can serve
1899 # read repo formats supported by all servers that can serve
1898 # them.
1900 # them.
1899
1901
1900 # if revlog format changes, client will have to check version
1902 # if revlog format changes, client will have to check version
1901 # and format flags on "stream" capability, and use
1903 # and format flags on "stream" capability, and use
1902 # uncompressed only if compatible.
1904 # uncompressed only if compatible.
1903
1905
1904 if stream and not heads:
1906 if stream and not heads:
1905 # 'stream' means remote revlog format is revlogv1 only
1907 # 'stream' means remote revlog format is revlogv1 only
1906 if remote.capable('stream'):
1908 if remote.capable('stream'):
1907 return self.stream_in(remote, set(('revlogv1',)))
1909 return self.stream_in(remote, set(('revlogv1',)))
1908 # otherwise, 'streamreqs' contains the remote revlog format
1910 # otherwise, 'streamreqs' contains the remote revlog format
1909 streamreqs = remote.capable('streamreqs')
1911 streamreqs = remote.capable('streamreqs')
1910 if streamreqs:
1912 if streamreqs:
1911 streamreqs = set(streamreqs.split(','))
1913 streamreqs = set(streamreqs.split(','))
1912 # if we support it, stream in and adjust our requirements
1914 # if we support it, stream in and adjust our requirements
1913 if not streamreqs - self.supportedformats:
1915 if not streamreqs - self.supportedformats:
1914 return self.stream_in(remote, streamreqs)
1916 return self.stream_in(remote, streamreqs)
1915 return self.pull(remote, heads)
1917 return self.pull(remote, heads)
1916
1918
1917 def pushkey(self, namespace, key, old, new):
1919 def pushkey(self, namespace, key, old, new):
1918 return pushkey.push(self, namespace, key, old, new)
1920 return pushkey.push(self, namespace, key, old, new)
1919
1921
1920 def listkeys(self, namespace):
1922 def listkeys(self, namespace):
1921 return pushkey.list(self, namespace)
1923 return pushkey.list(self, namespace)
1922
1924
1923 # used to avoid circular references so destructors work
1925 # used to avoid circular references so destructors work
1924 def aftertrans(files):
1926 def aftertrans(files):
1925 renamefiles = [tuple(t) for t in files]
1927 renamefiles = [tuple(t) for t in files]
1926 def a():
1928 def a():
1927 for src, dest in renamefiles:
1929 for src, dest in renamefiles:
1928 util.rename(src, dest)
1930 util.rename(src, dest)
1929 return a
1931 return a
1930
1932
1931 def instance(ui, path, create):
1933 def instance(ui, path, create):
1932 return localrepository(ui, util.drop_scheme('file', path), create)
1934 return localrepository(ui, util.drop_scheme('file', path), create)
1933
1935
1934 def islocal(path):
1936 def islocal(path):
1935 return True
1937 return True
General Comments 0
You need to be logged in to leave comments. Login now