##// END OF EJS Templates
branchcache: improve speed relative to the amount of heads...
Dan Villiom Podlaski Christiansen -
r14056:bcfe78c3 default
parent child Browse files
Show More
@@ -1,1940 +1,1942
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks
11 import changelog, dirstate, filelog, manifest, context, bookmarks
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import scmutil, util, extensions, hook, error
13 import scmutil, util, extensions, hook, error
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 import url as urlmod
17 import url as urlmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 propertycache = util.propertycache
20 propertycache = util.propertycache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 'known', 'getbundle'))
24 'known', 'getbundle'))
25 supportedformats = set(('revlogv1', 'parentdelta'))
25 supportedformats = set(('revlogv1', 'parentdelta'))
26 supported = supportedformats | set(('store', 'fncache', 'shared',
26 supported = supportedformats | set(('store', 'fncache', 'shared',
27 'dotencode'))
27 'dotencode'))
28
28
29 def __init__(self, baseui, path=None, create=0):
29 def __init__(self, baseui, path=None, create=0):
30 repo.repository.__init__(self)
30 repo.repository.__init__(self)
31 self.root = os.path.realpath(util.expandpath(path))
31 self.root = os.path.realpath(util.expandpath(path))
32 self.path = os.path.join(self.root, ".hg")
32 self.path = os.path.join(self.root, ".hg")
33 self.origroot = path
33 self.origroot = path
34 self.auditor = scmutil.path_auditor(self.root, self._checknested)
34 self.auditor = scmutil.path_auditor(self.root, self._checknested)
35 self.opener = scmutil.opener(self.path)
35 self.opener = scmutil.opener(self.path)
36 self.wopener = scmutil.opener(self.root)
36 self.wopener = scmutil.opener(self.root)
37 self.baseui = baseui
37 self.baseui = baseui
38 self.ui = baseui.copy()
38 self.ui = baseui.copy()
39
39
40 try:
40 try:
41 self.ui.readconfig(self.join("hgrc"), self.root)
41 self.ui.readconfig(self.join("hgrc"), self.root)
42 extensions.loadall(self.ui)
42 extensions.loadall(self.ui)
43 except IOError:
43 except IOError:
44 pass
44 pass
45
45
46 if not os.path.isdir(self.path):
46 if not os.path.isdir(self.path):
47 if create:
47 if create:
48 if not os.path.exists(path):
48 if not os.path.exists(path):
49 util.makedirs(path)
49 util.makedirs(path)
50 util.makedir(self.path, notindexed=True)
50 util.makedir(self.path, notindexed=True)
51 requirements = ["revlogv1"]
51 requirements = ["revlogv1"]
52 if self.ui.configbool('format', 'usestore', True):
52 if self.ui.configbool('format', 'usestore', True):
53 os.mkdir(os.path.join(self.path, "store"))
53 os.mkdir(os.path.join(self.path, "store"))
54 requirements.append("store")
54 requirements.append("store")
55 if self.ui.configbool('format', 'usefncache', True):
55 if self.ui.configbool('format', 'usefncache', True):
56 requirements.append("fncache")
56 requirements.append("fncache")
57 if self.ui.configbool('format', 'dotencode', True):
57 if self.ui.configbool('format', 'dotencode', True):
58 requirements.append('dotencode')
58 requirements.append('dotencode')
59 # create an invalid changelog
59 # create an invalid changelog
60 self.opener("00changelog.i", "a").write(
60 self.opener("00changelog.i", "a").write(
61 '\0\0\0\2' # represents revlogv2
61 '\0\0\0\2' # represents revlogv2
62 ' dummy changelog to prevent using the old repo layout'
62 ' dummy changelog to prevent using the old repo layout'
63 )
63 )
64 if self.ui.configbool('format', 'parentdelta', False):
64 if self.ui.configbool('format', 'parentdelta', False):
65 requirements.append("parentdelta")
65 requirements.append("parentdelta")
66 else:
66 else:
67 raise error.RepoError(_("repository %s not found") % path)
67 raise error.RepoError(_("repository %s not found") % path)
68 elif create:
68 elif create:
69 raise error.RepoError(_("repository %s already exists") % path)
69 raise error.RepoError(_("repository %s already exists") % path)
70 else:
70 else:
71 # find requirements
71 # find requirements
72 requirements = set()
72 requirements = set()
73 try:
73 try:
74 requirements = set(self.opener("requires").read().splitlines())
74 requirements = set(self.opener("requires").read().splitlines())
75 except IOError, inst:
75 except IOError, inst:
76 if inst.errno != errno.ENOENT:
76 if inst.errno != errno.ENOENT:
77 raise
77 raise
78 for r in requirements - self.supported:
78 for r in requirements - self.supported:
79 raise error.RequirementError(
79 raise error.RequirementError(
80 _("requirement '%s' not supported") % r)
80 _("requirement '%s' not supported") % r)
81
81
82 self.sharedpath = self.path
82 self.sharedpath = self.path
83 try:
83 try:
84 s = os.path.realpath(self.opener("sharedpath").read())
84 s = os.path.realpath(self.opener("sharedpath").read())
85 if not os.path.exists(s):
85 if not os.path.exists(s):
86 raise error.RepoError(
86 raise error.RepoError(
87 _('.hg/sharedpath points to nonexistent directory %s') % s)
87 _('.hg/sharedpath points to nonexistent directory %s') % s)
88 self.sharedpath = s
88 self.sharedpath = s
89 except IOError, inst:
89 except IOError, inst:
90 if inst.errno != errno.ENOENT:
90 if inst.errno != errno.ENOENT:
91 raise
91 raise
92
92
93 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
93 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
94 self.spath = self.store.path
94 self.spath = self.store.path
95 self.sopener = self.store.opener
95 self.sopener = self.store.opener
96 self.sjoin = self.store.join
96 self.sjoin = self.store.join
97 self.opener.createmode = self.store.createmode
97 self.opener.createmode = self.store.createmode
98 self._applyrequirements(requirements)
98 self._applyrequirements(requirements)
99 if create:
99 if create:
100 self._writerequirements()
100 self._writerequirements()
101
101
102 # These two define the set of tags for this repository. _tags
102 # These two define the set of tags for this repository. _tags
103 # maps tag name to node; _tagtypes maps tag name to 'global' or
103 # maps tag name to node; _tagtypes maps tag name to 'global' or
104 # 'local'. (Global tags are defined by .hgtags across all
104 # 'local'. (Global tags are defined by .hgtags across all
105 # heads, and local tags are defined in .hg/localtags.) They
105 # heads, and local tags are defined in .hg/localtags.) They
106 # constitute the in-memory cache of tags.
106 # constitute the in-memory cache of tags.
107 self._tags = None
107 self._tags = None
108 self._tagtypes = None
108 self._tagtypes = None
109
109
110 self._branchcache = None
110 self._branchcache = None
111 self._branchcachetip = None
111 self._branchcachetip = None
112 self.nodetagscache = None
112 self.nodetagscache = None
113 self.filterpats = {}
113 self.filterpats = {}
114 self._datafilters = {}
114 self._datafilters = {}
115 self._transref = self._lockref = self._wlockref = None
115 self._transref = self._lockref = self._wlockref = None
116
116
117 def _applyrequirements(self, requirements):
117 def _applyrequirements(self, requirements):
118 self.requirements = requirements
118 self.requirements = requirements
119 self.sopener.options = {}
119 self.sopener.options = {}
120 if 'parentdelta' in requirements:
120 if 'parentdelta' in requirements:
121 self.sopener.options['parentdelta'] = 1
121 self.sopener.options['parentdelta'] = 1
122
122
123 def _writerequirements(self):
123 def _writerequirements(self):
124 reqfile = self.opener("requires", "w")
124 reqfile = self.opener("requires", "w")
125 for r in self.requirements:
125 for r in self.requirements:
126 reqfile.write("%s\n" % r)
126 reqfile.write("%s\n" % r)
127 reqfile.close()
127 reqfile.close()
128
128
129 def _checknested(self, path):
129 def _checknested(self, path):
130 """Determine if path is a legal nested repository."""
130 """Determine if path is a legal nested repository."""
131 if not path.startswith(self.root):
131 if not path.startswith(self.root):
132 return False
132 return False
133 subpath = path[len(self.root) + 1:]
133 subpath = path[len(self.root) + 1:]
134
134
135 # XXX: Checking against the current working copy is wrong in
135 # XXX: Checking against the current working copy is wrong in
136 # the sense that it can reject things like
136 # the sense that it can reject things like
137 #
137 #
138 # $ hg cat -r 10 sub/x.txt
138 # $ hg cat -r 10 sub/x.txt
139 #
139 #
140 # if sub/ is no longer a subrepository in the working copy
140 # if sub/ is no longer a subrepository in the working copy
141 # parent revision.
141 # parent revision.
142 #
142 #
143 # However, it can of course also allow things that would have
143 # However, it can of course also allow things that would have
144 # been rejected before, such as the above cat command if sub/
144 # been rejected before, such as the above cat command if sub/
145 # is a subrepository now, but was a normal directory before.
145 # is a subrepository now, but was a normal directory before.
146 # The old path auditor would have rejected by mistake since it
146 # The old path auditor would have rejected by mistake since it
147 # panics when it sees sub/.hg/.
147 # panics when it sees sub/.hg/.
148 #
148 #
149 # All in all, checking against the working copy seems sensible
149 # All in all, checking against the working copy seems sensible
150 # since we want to prevent access to nested repositories on
150 # since we want to prevent access to nested repositories on
151 # the filesystem *now*.
151 # the filesystem *now*.
152 ctx = self[None]
152 ctx = self[None]
153 parts = util.splitpath(subpath)
153 parts = util.splitpath(subpath)
154 while parts:
154 while parts:
155 prefix = os.sep.join(parts)
155 prefix = os.sep.join(parts)
156 if prefix in ctx.substate:
156 if prefix in ctx.substate:
157 if prefix == subpath:
157 if prefix == subpath:
158 return True
158 return True
159 else:
159 else:
160 sub = ctx.sub(prefix)
160 sub = ctx.sub(prefix)
161 return sub.checknested(subpath[len(prefix) + 1:])
161 return sub.checknested(subpath[len(prefix) + 1:])
162 else:
162 else:
163 parts.pop()
163 parts.pop()
164 return False
164 return False
165
165
166 @util.propertycache
166 @util.propertycache
167 def _bookmarks(self):
167 def _bookmarks(self):
168 return bookmarks.read(self)
168 return bookmarks.read(self)
169
169
170 @util.propertycache
170 @util.propertycache
171 def _bookmarkcurrent(self):
171 def _bookmarkcurrent(self):
172 return bookmarks.readcurrent(self)
172 return bookmarks.readcurrent(self)
173
173
174 @propertycache
174 @propertycache
175 def changelog(self):
175 def changelog(self):
176 c = changelog.changelog(self.sopener)
176 c = changelog.changelog(self.sopener)
177 if 'HG_PENDING' in os.environ:
177 if 'HG_PENDING' in os.environ:
178 p = os.environ['HG_PENDING']
178 p = os.environ['HG_PENDING']
179 if p.startswith(self.root):
179 if p.startswith(self.root):
180 c.readpending('00changelog.i.a')
180 c.readpending('00changelog.i.a')
181 self.sopener.options['defversion'] = c.version
181 self.sopener.options['defversion'] = c.version
182 return c
182 return c
183
183
184 @propertycache
184 @propertycache
185 def manifest(self):
185 def manifest(self):
186 return manifest.manifest(self.sopener)
186 return manifest.manifest(self.sopener)
187
187
188 @propertycache
188 @propertycache
189 def dirstate(self):
189 def dirstate(self):
190 warned = [0]
190 warned = [0]
191 def validate(node):
191 def validate(node):
192 try:
192 try:
193 r = self.changelog.rev(node)
193 r = self.changelog.rev(node)
194 return node
194 return node
195 except error.LookupError:
195 except error.LookupError:
196 if not warned[0]:
196 if not warned[0]:
197 warned[0] = True
197 warned[0] = True
198 self.ui.warn(_("warning: ignoring unknown"
198 self.ui.warn(_("warning: ignoring unknown"
199 " working parent %s!\n") % short(node))
199 " working parent %s!\n") % short(node))
200 return nullid
200 return nullid
201
201
202 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
202 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
203
203
204 def __getitem__(self, changeid):
204 def __getitem__(self, changeid):
205 if changeid is None:
205 if changeid is None:
206 return context.workingctx(self)
206 return context.workingctx(self)
207 return context.changectx(self, changeid)
207 return context.changectx(self, changeid)
208
208
209 def __contains__(self, changeid):
209 def __contains__(self, changeid):
210 try:
210 try:
211 return bool(self.lookup(changeid))
211 return bool(self.lookup(changeid))
212 except error.RepoLookupError:
212 except error.RepoLookupError:
213 return False
213 return False
214
214
215 def __nonzero__(self):
215 def __nonzero__(self):
216 return True
216 return True
217
217
218 def __len__(self):
218 def __len__(self):
219 return len(self.changelog)
219 return len(self.changelog)
220
220
221 def __iter__(self):
221 def __iter__(self):
222 for i in xrange(len(self)):
222 for i in xrange(len(self)):
223 yield i
223 yield i
224
224
225 def url(self):
225 def url(self):
226 return 'file:' + self.root
226 return 'file:' + self.root
227
227
228 def hook(self, name, throw=False, **args):
228 def hook(self, name, throw=False, **args):
229 return hook.hook(self.ui, self, name, throw, **args)
229 return hook.hook(self.ui, self, name, throw, **args)
230
230
231 tag_disallowed = ':\r\n'
231 tag_disallowed = ':\r\n'
232
232
233 def _tag(self, names, node, message, local, user, date, extra={}):
233 def _tag(self, names, node, message, local, user, date, extra={}):
234 if isinstance(names, str):
234 if isinstance(names, str):
235 allchars = names
235 allchars = names
236 names = (names,)
236 names = (names,)
237 else:
237 else:
238 allchars = ''.join(names)
238 allchars = ''.join(names)
239 for c in self.tag_disallowed:
239 for c in self.tag_disallowed:
240 if c in allchars:
240 if c in allchars:
241 raise util.Abort(_('%r cannot be used in a tag name') % c)
241 raise util.Abort(_('%r cannot be used in a tag name') % c)
242
242
243 branches = self.branchmap()
243 branches = self.branchmap()
244 for name in names:
244 for name in names:
245 self.hook('pretag', throw=True, node=hex(node), tag=name,
245 self.hook('pretag', throw=True, node=hex(node), tag=name,
246 local=local)
246 local=local)
247 if name in branches:
247 if name in branches:
248 self.ui.warn(_("warning: tag %s conflicts with existing"
248 self.ui.warn(_("warning: tag %s conflicts with existing"
249 " branch name\n") % name)
249 " branch name\n") % name)
250
250
251 def writetags(fp, names, munge, prevtags):
251 def writetags(fp, names, munge, prevtags):
252 fp.seek(0, 2)
252 fp.seek(0, 2)
253 if prevtags and prevtags[-1] != '\n':
253 if prevtags and prevtags[-1] != '\n':
254 fp.write('\n')
254 fp.write('\n')
255 for name in names:
255 for name in names:
256 m = munge and munge(name) or name
256 m = munge and munge(name) or name
257 if self._tagtypes and name in self._tagtypes:
257 if self._tagtypes and name in self._tagtypes:
258 old = self._tags.get(name, nullid)
258 old = self._tags.get(name, nullid)
259 fp.write('%s %s\n' % (hex(old), m))
259 fp.write('%s %s\n' % (hex(old), m))
260 fp.write('%s %s\n' % (hex(node), m))
260 fp.write('%s %s\n' % (hex(node), m))
261 fp.close()
261 fp.close()
262
262
263 prevtags = ''
263 prevtags = ''
264 if local:
264 if local:
265 try:
265 try:
266 fp = self.opener('localtags', 'r+')
266 fp = self.opener('localtags', 'r+')
267 except IOError:
267 except IOError:
268 fp = self.opener('localtags', 'a')
268 fp = self.opener('localtags', 'a')
269 else:
269 else:
270 prevtags = fp.read()
270 prevtags = fp.read()
271
271
272 # local tags are stored in the current charset
272 # local tags are stored in the current charset
273 writetags(fp, names, None, prevtags)
273 writetags(fp, names, None, prevtags)
274 for name in names:
274 for name in names:
275 self.hook('tag', node=hex(node), tag=name, local=local)
275 self.hook('tag', node=hex(node), tag=name, local=local)
276 return
276 return
277
277
278 try:
278 try:
279 fp = self.wfile('.hgtags', 'rb+')
279 fp = self.wfile('.hgtags', 'rb+')
280 except IOError:
280 except IOError:
281 fp = self.wfile('.hgtags', 'ab')
281 fp = self.wfile('.hgtags', 'ab')
282 else:
282 else:
283 prevtags = fp.read()
283 prevtags = fp.read()
284
284
285 # committed tags are stored in UTF-8
285 # committed tags are stored in UTF-8
286 writetags(fp, names, encoding.fromlocal, prevtags)
286 writetags(fp, names, encoding.fromlocal, prevtags)
287
287
288 fp.close()
288 fp.close()
289
289
290 if '.hgtags' not in self.dirstate:
290 if '.hgtags' not in self.dirstate:
291 self[None].add(['.hgtags'])
291 self[None].add(['.hgtags'])
292
292
293 m = matchmod.exact(self.root, '', ['.hgtags'])
293 m = matchmod.exact(self.root, '', ['.hgtags'])
294 tagnode = self.commit(message, user, date, extra=extra, match=m)
294 tagnode = self.commit(message, user, date, extra=extra, match=m)
295
295
296 for name in names:
296 for name in names:
297 self.hook('tag', node=hex(node), tag=name, local=local)
297 self.hook('tag', node=hex(node), tag=name, local=local)
298
298
299 return tagnode
299 return tagnode
300
300
301 def tag(self, names, node, message, local, user, date):
301 def tag(self, names, node, message, local, user, date):
302 '''tag a revision with one or more symbolic names.
302 '''tag a revision with one or more symbolic names.
303
303
304 names is a list of strings or, when adding a single tag, names may be a
304 names is a list of strings or, when adding a single tag, names may be a
305 string.
305 string.
306
306
307 if local is True, the tags are stored in a per-repository file.
307 if local is True, the tags are stored in a per-repository file.
308 otherwise, they are stored in the .hgtags file, and a new
308 otherwise, they are stored in the .hgtags file, and a new
309 changeset is committed with the change.
309 changeset is committed with the change.
310
310
311 keyword arguments:
311 keyword arguments:
312
312
313 local: whether to store tags in non-version-controlled file
313 local: whether to store tags in non-version-controlled file
314 (default False)
314 (default False)
315
315
316 message: commit message to use if committing
316 message: commit message to use if committing
317
317
318 user: name of user to use if committing
318 user: name of user to use if committing
319
319
320 date: date tuple to use if committing'''
320 date: date tuple to use if committing'''
321
321
322 if not local:
322 if not local:
323 for x in self.status()[:5]:
323 for x in self.status()[:5]:
324 if '.hgtags' in x:
324 if '.hgtags' in x:
325 raise util.Abort(_('working copy of .hgtags is changed '
325 raise util.Abort(_('working copy of .hgtags is changed '
326 '(please commit .hgtags manually)'))
326 '(please commit .hgtags manually)'))
327
327
328 self.tags() # instantiate the cache
328 self.tags() # instantiate the cache
329 self._tag(names, node, message, local, user, date)
329 self._tag(names, node, message, local, user, date)
330
330
331 def tags(self):
331 def tags(self):
332 '''return a mapping of tag to node'''
332 '''return a mapping of tag to node'''
333 if self._tags is None:
333 if self._tags is None:
334 (self._tags, self._tagtypes) = self._findtags()
334 (self._tags, self._tagtypes) = self._findtags()
335
335
336 return self._tags
336 return self._tags
337
337
338 def _findtags(self):
338 def _findtags(self):
339 '''Do the hard work of finding tags. Return a pair of dicts
339 '''Do the hard work of finding tags. Return a pair of dicts
340 (tags, tagtypes) where tags maps tag name to node, and tagtypes
340 (tags, tagtypes) where tags maps tag name to node, and tagtypes
341 maps tag name to a string like \'global\' or \'local\'.
341 maps tag name to a string like \'global\' or \'local\'.
342 Subclasses or extensions are free to add their own tags, but
342 Subclasses or extensions are free to add their own tags, but
343 should be aware that the returned dicts will be retained for the
343 should be aware that the returned dicts will be retained for the
344 duration of the localrepo object.'''
344 duration of the localrepo object.'''
345
345
346 # XXX what tagtype should subclasses/extensions use? Currently
346 # XXX what tagtype should subclasses/extensions use? Currently
347 # mq and bookmarks add tags, but do not set the tagtype at all.
347 # mq and bookmarks add tags, but do not set the tagtype at all.
348 # Should each extension invent its own tag type? Should there
348 # Should each extension invent its own tag type? Should there
349 # be one tagtype for all such "virtual" tags? Or is the status
349 # be one tagtype for all such "virtual" tags? Or is the status
350 # quo fine?
350 # quo fine?
351
351
352 alltags = {} # map tag name to (node, hist)
352 alltags = {} # map tag name to (node, hist)
353 tagtypes = {}
353 tagtypes = {}
354
354
355 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
355 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
356 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
356 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
357
357
358 # Build the return dicts. Have to re-encode tag names because
358 # Build the return dicts. Have to re-encode tag names because
359 # the tags module always uses UTF-8 (in order not to lose info
359 # the tags module always uses UTF-8 (in order not to lose info
360 # writing to the cache), but the rest of Mercurial wants them in
360 # writing to the cache), but the rest of Mercurial wants them in
361 # local encoding.
361 # local encoding.
362 tags = {}
362 tags = {}
363 for (name, (node, hist)) in alltags.iteritems():
363 for (name, (node, hist)) in alltags.iteritems():
364 if node != nullid:
364 if node != nullid:
365 try:
365 try:
366 # ignore tags to unknown nodes
366 # ignore tags to unknown nodes
367 self.changelog.lookup(node)
367 self.changelog.lookup(node)
368 tags[encoding.tolocal(name)] = node
368 tags[encoding.tolocal(name)] = node
369 except error.LookupError:
369 except error.LookupError:
370 pass
370 pass
371 tags['tip'] = self.changelog.tip()
371 tags['tip'] = self.changelog.tip()
372 tagtypes = dict([(encoding.tolocal(name), value)
372 tagtypes = dict([(encoding.tolocal(name), value)
373 for (name, value) in tagtypes.iteritems()])
373 for (name, value) in tagtypes.iteritems()])
374 return (tags, tagtypes)
374 return (tags, tagtypes)
375
375
376 def tagtype(self, tagname):
376 def tagtype(self, tagname):
377 '''
377 '''
378 return the type of the given tag. result can be:
378 return the type of the given tag. result can be:
379
379
380 'local' : a local tag
380 'local' : a local tag
381 'global' : a global tag
381 'global' : a global tag
382 None : tag does not exist
382 None : tag does not exist
383 '''
383 '''
384
384
385 self.tags()
385 self.tags()
386
386
387 return self._tagtypes.get(tagname)
387 return self._tagtypes.get(tagname)
388
388
389 def tagslist(self):
389 def tagslist(self):
390 '''return a list of tags ordered by revision'''
390 '''return a list of tags ordered by revision'''
391 l = []
391 l = []
392 for t, n in self.tags().iteritems():
392 for t, n in self.tags().iteritems():
393 r = self.changelog.rev(n)
393 r = self.changelog.rev(n)
394 l.append((r, t, n))
394 l.append((r, t, n))
395 return [(t, n) for r, t, n in sorted(l)]
395 return [(t, n) for r, t, n in sorted(l)]
396
396
397 def nodetags(self, node):
397 def nodetags(self, node):
398 '''return the tags associated with a node'''
398 '''return the tags associated with a node'''
399 if not self.nodetagscache:
399 if not self.nodetagscache:
400 self.nodetagscache = {}
400 self.nodetagscache = {}
401 for t, n in self.tags().iteritems():
401 for t, n in self.tags().iteritems():
402 self.nodetagscache.setdefault(n, []).append(t)
402 self.nodetagscache.setdefault(n, []).append(t)
403 for tags in self.nodetagscache.itervalues():
403 for tags in self.nodetagscache.itervalues():
404 tags.sort()
404 tags.sort()
405 return self.nodetagscache.get(node, [])
405 return self.nodetagscache.get(node, [])
406
406
407 def nodebookmarks(self, node):
407 def nodebookmarks(self, node):
408 marks = []
408 marks = []
409 for bookmark, n in self._bookmarks.iteritems():
409 for bookmark, n in self._bookmarks.iteritems():
410 if n == node:
410 if n == node:
411 marks.append(bookmark)
411 marks.append(bookmark)
412 return sorted(marks)
412 return sorted(marks)
413
413
414 def _branchtags(self, partial, lrev):
414 def _branchtags(self, partial, lrev):
415 # TODO: rename this function?
415 # TODO: rename this function?
416 tiprev = len(self) - 1
416 tiprev = len(self) - 1
417 if lrev != tiprev:
417 if lrev != tiprev:
418 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
418 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
419 self._updatebranchcache(partial, ctxgen)
419 self._updatebranchcache(partial, ctxgen)
420 self._writebranchcache(partial, self.changelog.tip(), tiprev)
420 self._writebranchcache(partial, self.changelog.tip(), tiprev)
421
421
422 return partial
422 return partial
423
423
424 def updatebranchcache(self):
424 def updatebranchcache(self):
425 tip = self.changelog.tip()
425 tip = self.changelog.tip()
426 if self._branchcache is not None and self._branchcachetip == tip:
426 if self._branchcache is not None and self._branchcachetip == tip:
427 return self._branchcache
427 return self._branchcache
428
428
429 oldtip = self._branchcachetip
429 oldtip = self._branchcachetip
430 self._branchcachetip = tip
430 self._branchcachetip = tip
431 if oldtip is None or oldtip not in self.changelog.nodemap:
431 if oldtip is None or oldtip not in self.changelog.nodemap:
432 partial, last, lrev = self._readbranchcache()
432 partial, last, lrev = self._readbranchcache()
433 else:
433 else:
434 lrev = self.changelog.rev(oldtip)
434 lrev = self.changelog.rev(oldtip)
435 partial = self._branchcache
435 partial = self._branchcache
436
436
437 self._branchtags(partial, lrev)
437 self._branchtags(partial, lrev)
438 # this private cache holds all heads (not just tips)
438 # this private cache holds all heads (not just tips)
439 self._branchcache = partial
439 self._branchcache = partial
440
440
441 def branchmap(self):
441 def branchmap(self):
442 '''returns a dictionary {branch: [branchheads]}'''
442 '''returns a dictionary {branch: [branchheads]}'''
443 self.updatebranchcache()
443 self.updatebranchcache()
444 return self._branchcache
444 return self._branchcache
445
445
446 def branchtags(self):
446 def branchtags(self):
447 '''return a dict where branch names map to the tipmost head of
447 '''return a dict where branch names map to the tipmost head of
448 the branch, open heads come before closed'''
448 the branch, open heads come before closed'''
449 bt = {}
449 bt = {}
450 for bn, heads in self.branchmap().iteritems():
450 for bn, heads in self.branchmap().iteritems():
451 tip = heads[-1]
451 tip = heads[-1]
452 for h in reversed(heads):
452 for h in reversed(heads):
453 if 'close' not in self.changelog.read(h)[5]:
453 if 'close' not in self.changelog.read(h)[5]:
454 tip = h
454 tip = h
455 break
455 break
456 bt[bn] = tip
456 bt[bn] = tip
457 return bt
457 return bt
458
458
459 def _readbranchcache(self):
459 def _readbranchcache(self):
460 partial = {}
460 partial = {}
461 try:
461 try:
462 f = self.opener("cache/branchheads")
462 f = self.opener("cache/branchheads")
463 lines = f.read().split('\n')
463 lines = f.read().split('\n')
464 f.close()
464 f.close()
465 except (IOError, OSError):
465 except (IOError, OSError):
466 return {}, nullid, nullrev
466 return {}, nullid, nullrev
467
467
468 try:
468 try:
469 last, lrev = lines.pop(0).split(" ", 1)
469 last, lrev = lines.pop(0).split(" ", 1)
470 last, lrev = bin(last), int(lrev)
470 last, lrev = bin(last), int(lrev)
471 if lrev >= len(self) or self[lrev].node() != last:
471 if lrev >= len(self) or self[lrev].node() != last:
472 # invalidate the cache
472 # invalidate the cache
473 raise ValueError('invalidating branch cache (tip differs)')
473 raise ValueError('invalidating branch cache (tip differs)')
474 for l in lines:
474 for l in lines:
475 if not l:
475 if not l:
476 continue
476 continue
477 node, label = l.split(" ", 1)
477 node, label = l.split(" ", 1)
478 label = encoding.tolocal(label.strip())
478 label = encoding.tolocal(label.strip())
479 partial.setdefault(label, []).append(bin(node))
479 partial.setdefault(label, []).append(bin(node))
480 except KeyboardInterrupt:
480 except KeyboardInterrupt:
481 raise
481 raise
482 except Exception, inst:
482 except Exception, inst:
483 if self.ui.debugflag:
483 if self.ui.debugflag:
484 self.ui.warn(str(inst), '\n')
484 self.ui.warn(str(inst), '\n')
485 partial, last, lrev = {}, nullid, nullrev
485 partial, last, lrev = {}, nullid, nullrev
486 return partial, last, lrev
486 return partial, last, lrev
487
487
488 def _writebranchcache(self, branches, tip, tiprev):
488 def _writebranchcache(self, branches, tip, tiprev):
489 try:
489 try:
490 f = self.opener("cache/branchheads", "w", atomictemp=True)
490 f = self.opener("cache/branchheads", "w", atomictemp=True)
491 f.write("%s %s\n" % (hex(tip), tiprev))
491 f.write("%s %s\n" % (hex(tip), tiprev))
492 for label, nodes in branches.iteritems():
492 for label, nodes in branches.iteritems():
493 for node in nodes:
493 for node in nodes:
494 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
494 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
495 f.rename()
495 f.rename()
496 except (IOError, OSError):
496 except (IOError, OSError):
497 pass
497 pass
498
498
499 def _updatebranchcache(self, partial, ctxgen):
499 def _updatebranchcache(self, partial, ctxgen):
500 # collect new branch entries
500 # collect new branch entries
501 newbranches = {}
501 newbranches = {}
502 for c in ctxgen:
502 for c in ctxgen:
503 newbranches.setdefault(c.branch(), []).append(c.node())
503 newbranches.setdefault(c.branch(), []).append(c.node())
504 # if older branchheads are reachable from new ones, they aren't
504 # if older branchheads are reachable from new ones, they aren't
505 # really branchheads. Note checking parents is insufficient:
505 # really branchheads. Note checking parents is insufficient:
506 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
506 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
507 for branch, newnodes in newbranches.iteritems():
507 for branch, newnodes in newbranches.iteritems():
508 bheads = partial.setdefault(branch, [])
508 bheads = partial.setdefault(branch, [])
509 bheads.extend(newnodes)
509 bheads.extend(newnodes)
510 if len(bheads) <= 1:
510 if len(bheads) <= 1:
511 continue
511 continue
512 bheads = sorted(bheads, key=lambda x: self[x].rev())
512 # starting from tip means fewer passes over reachable
513 # starting from tip means fewer passes over reachable
513 while newnodes:
514 while newnodes:
514 latest = newnodes.pop()
515 latest = newnodes.pop()
515 if latest not in bheads:
516 if latest not in bheads:
516 continue
517 continue
517 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
518 minbhrev = self[bheads[0]].node()
518 reachable = self.changelog.reachable(latest, minbhrev)
519 reachable = self.changelog.reachable(latest, minbhrev)
519 reachable.remove(latest)
520 reachable.remove(latest)
520 bheads = [b for b in bheads if b not in reachable]
521 if reachable:
522 bheads = [b for b in bheads if b not in reachable]
521 partial[branch] = bheads
523 partial[branch] = bheads
522
524
523 def lookup(self, key):
525 def lookup(self, key):
524 if isinstance(key, int):
526 if isinstance(key, int):
525 return self.changelog.node(key)
527 return self.changelog.node(key)
526 elif key == '.':
528 elif key == '.':
527 return self.dirstate.p1()
529 return self.dirstate.p1()
528 elif key == 'null':
530 elif key == 'null':
529 return nullid
531 return nullid
530 elif key == 'tip':
532 elif key == 'tip':
531 return self.changelog.tip()
533 return self.changelog.tip()
532 n = self.changelog._match(key)
534 n = self.changelog._match(key)
533 if n:
535 if n:
534 return n
536 return n
535 if key in self._bookmarks:
537 if key in self._bookmarks:
536 return self._bookmarks[key]
538 return self._bookmarks[key]
537 if key in self.tags():
539 if key in self.tags():
538 return self.tags()[key]
540 return self.tags()[key]
539 if key in self.branchtags():
541 if key in self.branchtags():
540 return self.branchtags()[key]
542 return self.branchtags()[key]
541 n = self.changelog._partialmatch(key)
543 n = self.changelog._partialmatch(key)
542 if n:
544 if n:
543 return n
545 return n
544
546
545 # can't find key, check if it might have come from damaged dirstate
547 # can't find key, check if it might have come from damaged dirstate
546 if key in self.dirstate.parents():
548 if key in self.dirstate.parents():
547 raise error.Abort(_("working directory has unknown parent '%s'!")
549 raise error.Abort(_("working directory has unknown parent '%s'!")
548 % short(key))
550 % short(key))
549 try:
551 try:
550 if len(key) == 20:
552 if len(key) == 20:
551 key = hex(key)
553 key = hex(key)
552 except TypeError:
554 except TypeError:
553 pass
555 pass
554 raise error.RepoLookupError(_("unknown revision '%s'") % key)
556 raise error.RepoLookupError(_("unknown revision '%s'") % key)
555
557
556 def lookupbranch(self, key, remote=None):
558 def lookupbranch(self, key, remote=None):
557 repo = remote or self
559 repo = remote or self
558 if key in repo.branchmap():
560 if key in repo.branchmap():
559 return key
561 return key
560
562
561 repo = (remote and remote.local()) and remote or self
563 repo = (remote and remote.local()) and remote or self
562 return repo[key].branch()
564 return repo[key].branch()
563
565
564 def known(self, nodes):
566 def known(self, nodes):
565 nm = self.changelog.nodemap
567 nm = self.changelog.nodemap
566 return [(n in nm) for n in nodes]
568 return [(n in nm) for n in nodes]
567
569
568 def local(self):
570 def local(self):
569 return True
571 return True
570
572
571 def join(self, f):
573 def join(self, f):
572 return os.path.join(self.path, f)
574 return os.path.join(self.path, f)
573
575
574 def wjoin(self, f):
576 def wjoin(self, f):
575 return os.path.join(self.root, f)
577 return os.path.join(self.root, f)
576
578
577 def file(self, f):
579 def file(self, f):
578 if f[0] == '/':
580 if f[0] == '/':
579 f = f[1:]
581 f = f[1:]
580 return filelog.filelog(self.sopener, f)
582 return filelog.filelog(self.sopener, f)
581
583
582 def changectx(self, changeid):
584 def changectx(self, changeid):
583 return self[changeid]
585 return self[changeid]
584
586
585 def parents(self, changeid=None):
587 def parents(self, changeid=None):
586 '''get list of changectxs for parents of changeid'''
588 '''get list of changectxs for parents of changeid'''
587 return self[changeid].parents()
589 return self[changeid].parents()
588
590
589 def filectx(self, path, changeid=None, fileid=None):
591 def filectx(self, path, changeid=None, fileid=None):
590 """changeid can be a changeset revision, node, or tag.
592 """changeid can be a changeset revision, node, or tag.
591 fileid can be a file revision or node."""
593 fileid can be a file revision or node."""
592 return context.filectx(self, path, changeid, fileid)
594 return context.filectx(self, path, changeid, fileid)
593
595
594 def getcwd(self):
596 def getcwd(self):
595 return self.dirstate.getcwd()
597 return self.dirstate.getcwd()
596
598
597 def pathto(self, f, cwd=None):
599 def pathto(self, f, cwd=None):
598 return self.dirstate.pathto(f, cwd)
600 return self.dirstate.pathto(f, cwd)
599
601
600 def wfile(self, f, mode='r'):
602 def wfile(self, f, mode='r'):
601 return self.wopener(f, mode)
603 return self.wopener(f, mode)
602
604
603 def _link(self, f):
605 def _link(self, f):
604 return os.path.islink(self.wjoin(f))
606 return os.path.islink(self.wjoin(f))
605
607
606 def _loadfilter(self, filter):
608 def _loadfilter(self, filter):
607 if filter not in self.filterpats:
609 if filter not in self.filterpats:
608 l = []
610 l = []
609 for pat, cmd in self.ui.configitems(filter):
611 for pat, cmd in self.ui.configitems(filter):
610 if cmd == '!':
612 if cmd == '!':
611 continue
613 continue
612 mf = matchmod.match(self.root, '', [pat])
614 mf = matchmod.match(self.root, '', [pat])
613 fn = None
615 fn = None
614 params = cmd
616 params = cmd
615 for name, filterfn in self._datafilters.iteritems():
617 for name, filterfn in self._datafilters.iteritems():
616 if cmd.startswith(name):
618 if cmd.startswith(name):
617 fn = filterfn
619 fn = filterfn
618 params = cmd[len(name):].lstrip()
620 params = cmd[len(name):].lstrip()
619 break
621 break
620 if not fn:
622 if not fn:
621 fn = lambda s, c, **kwargs: util.filter(s, c)
623 fn = lambda s, c, **kwargs: util.filter(s, c)
622 # Wrap old filters not supporting keyword arguments
624 # Wrap old filters not supporting keyword arguments
623 if not inspect.getargspec(fn)[2]:
625 if not inspect.getargspec(fn)[2]:
624 oldfn = fn
626 oldfn = fn
625 fn = lambda s, c, **kwargs: oldfn(s, c)
627 fn = lambda s, c, **kwargs: oldfn(s, c)
626 l.append((mf, fn, params))
628 l.append((mf, fn, params))
627 self.filterpats[filter] = l
629 self.filterpats[filter] = l
628 return self.filterpats[filter]
630 return self.filterpats[filter]
629
631
630 def _filter(self, filterpats, filename, data):
632 def _filter(self, filterpats, filename, data):
631 for mf, fn, cmd in filterpats:
633 for mf, fn, cmd in filterpats:
632 if mf(filename):
634 if mf(filename):
633 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
635 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
634 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
636 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
635 break
637 break
636
638
637 return data
639 return data
638
640
639 @propertycache
641 @propertycache
640 def _encodefilterpats(self):
642 def _encodefilterpats(self):
641 return self._loadfilter('encode')
643 return self._loadfilter('encode')
642
644
643 @propertycache
645 @propertycache
644 def _decodefilterpats(self):
646 def _decodefilterpats(self):
645 return self._loadfilter('decode')
647 return self._loadfilter('decode')
646
648
647 def adddatafilter(self, name, filter):
649 def adddatafilter(self, name, filter):
648 self._datafilters[name] = filter
650 self._datafilters[name] = filter
649
651
650 def wread(self, filename):
652 def wread(self, filename):
651 if self._link(filename):
653 if self._link(filename):
652 data = os.readlink(self.wjoin(filename))
654 data = os.readlink(self.wjoin(filename))
653 else:
655 else:
654 data = self.wopener(filename, 'r').read()
656 data = self.wopener(filename, 'r').read()
655 return self._filter(self._encodefilterpats, filename, data)
657 return self._filter(self._encodefilterpats, filename, data)
656
658
657 def wwrite(self, filename, data, flags):
659 def wwrite(self, filename, data, flags):
658 data = self._filter(self._decodefilterpats, filename, data)
660 data = self._filter(self._decodefilterpats, filename, data)
659 if 'l' in flags:
661 if 'l' in flags:
660 self.wopener.symlink(data, filename)
662 self.wopener.symlink(data, filename)
661 else:
663 else:
662 self.wopener(filename, 'w').write(data)
664 self.wopener(filename, 'w').write(data)
663 if 'x' in flags:
665 if 'x' in flags:
664 util.set_flags(self.wjoin(filename), False, True)
666 util.set_flags(self.wjoin(filename), False, True)
665
667
666 def wwritedata(self, filename, data):
668 def wwritedata(self, filename, data):
667 return self._filter(self._decodefilterpats, filename, data)
669 return self._filter(self._decodefilterpats, filename, data)
668
670
669 def transaction(self, desc):
671 def transaction(self, desc):
670 tr = self._transref and self._transref() or None
672 tr = self._transref and self._transref() or None
671 if tr and tr.running():
673 if tr and tr.running():
672 return tr.nest()
674 return tr.nest()
673
675
674 # abort here if the journal already exists
676 # abort here if the journal already exists
675 if os.path.exists(self.sjoin("journal")):
677 if os.path.exists(self.sjoin("journal")):
676 raise error.RepoError(
678 raise error.RepoError(
677 _("abandoned transaction found - run hg recover"))
679 _("abandoned transaction found - run hg recover"))
678
680
679 # save dirstate for rollback
681 # save dirstate for rollback
680 try:
682 try:
681 ds = self.opener("dirstate").read()
683 ds = self.opener("dirstate").read()
682 except IOError:
684 except IOError:
683 ds = ""
685 ds = ""
684 self.opener("journal.dirstate", "w").write(ds)
686 self.opener("journal.dirstate", "w").write(ds)
685 self.opener("journal.branch", "w").write(
687 self.opener("journal.branch", "w").write(
686 encoding.fromlocal(self.dirstate.branch()))
688 encoding.fromlocal(self.dirstate.branch()))
687 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
689 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
688
690
689 renames = [(self.sjoin("journal"), self.sjoin("undo")),
691 renames = [(self.sjoin("journal"), self.sjoin("undo")),
690 (self.join("journal.dirstate"), self.join("undo.dirstate")),
692 (self.join("journal.dirstate"), self.join("undo.dirstate")),
691 (self.join("journal.branch"), self.join("undo.branch")),
693 (self.join("journal.branch"), self.join("undo.branch")),
692 (self.join("journal.desc"), self.join("undo.desc"))]
694 (self.join("journal.desc"), self.join("undo.desc"))]
693 tr = transaction.transaction(self.ui.warn, self.sopener,
695 tr = transaction.transaction(self.ui.warn, self.sopener,
694 self.sjoin("journal"),
696 self.sjoin("journal"),
695 aftertrans(renames),
697 aftertrans(renames),
696 self.store.createmode)
698 self.store.createmode)
697 self._transref = weakref.ref(tr)
699 self._transref = weakref.ref(tr)
698 return tr
700 return tr
699
701
700 def recover(self):
702 def recover(self):
701 lock = self.lock()
703 lock = self.lock()
702 try:
704 try:
703 if os.path.exists(self.sjoin("journal")):
705 if os.path.exists(self.sjoin("journal")):
704 self.ui.status(_("rolling back interrupted transaction\n"))
706 self.ui.status(_("rolling back interrupted transaction\n"))
705 transaction.rollback(self.sopener, self.sjoin("journal"),
707 transaction.rollback(self.sopener, self.sjoin("journal"),
706 self.ui.warn)
708 self.ui.warn)
707 self.invalidate()
709 self.invalidate()
708 return True
710 return True
709 else:
711 else:
710 self.ui.warn(_("no interrupted transaction available\n"))
712 self.ui.warn(_("no interrupted transaction available\n"))
711 return False
713 return False
712 finally:
714 finally:
713 lock.release()
715 lock.release()
714
716
715 def rollback(self, dryrun=False):
717 def rollback(self, dryrun=False):
716 wlock = lock = None
718 wlock = lock = None
717 try:
719 try:
718 wlock = self.wlock()
720 wlock = self.wlock()
719 lock = self.lock()
721 lock = self.lock()
720 if os.path.exists(self.sjoin("undo")):
722 if os.path.exists(self.sjoin("undo")):
721 try:
723 try:
722 args = self.opener("undo.desc", "r").read().splitlines()
724 args = self.opener("undo.desc", "r").read().splitlines()
723 if len(args) >= 3 and self.ui.verbose:
725 if len(args) >= 3 and self.ui.verbose:
724 desc = _("repository tip rolled back to revision %s"
726 desc = _("repository tip rolled back to revision %s"
725 " (undo %s: %s)\n") % (
727 " (undo %s: %s)\n") % (
726 int(args[0]) - 1, args[1], args[2])
728 int(args[0]) - 1, args[1], args[2])
727 elif len(args) >= 2:
729 elif len(args) >= 2:
728 desc = _("repository tip rolled back to revision %s"
730 desc = _("repository tip rolled back to revision %s"
729 " (undo %s)\n") % (
731 " (undo %s)\n") % (
730 int(args[0]) - 1, args[1])
732 int(args[0]) - 1, args[1])
731 except IOError:
733 except IOError:
732 desc = _("rolling back unknown transaction\n")
734 desc = _("rolling back unknown transaction\n")
733 self.ui.status(desc)
735 self.ui.status(desc)
734 if dryrun:
736 if dryrun:
735 return
737 return
736 transaction.rollback(self.sopener, self.sjoin("undo"),
738 transaction.rollback(self.sopener, self.sjoin("undo"),
737 self.ui.warn)
739 self.ui.warn)
738 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
740 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
739 if os.path.exists(self.join('undo.bookmarks')):
741 if os.path.exists(self.join('undo.bookmarks')):
740 util.rename(self.join('undo.bookmarks'),
742 util.rename(self.join('undo.bookmarks'),
741 self.join('bookmarks'))
743 self.join('bookmarks'))
742 try:
744 try:
743 branch = self.opener("undo.branch").read()
745 branch = self.opener("undo.branch").read()
744 self.dirstate.setbranch(branch)
746 self.dirstate.setbranch(branch)
745 except IOError:
747 except IOError:
746 self.ui.warn(_("named branch could not be reset, "
748 self.ui.warn(_("named branch could not be reset, "
747 "current branch is still: %s\n")
749 "current branch is still: %s\n")
748 % self.dirstate.branch())
750 % self.dirstate.branch())
749 self.invalidate()
751 self.invalidate()
750 self.dirstate.invalidate()
752 self.dirstate.invalidate()
751 self.destroyed()
753 self.destroyed()
752 parents = tuple([p.rev() for p in self.parents()])
754 parents = tuple([p.rev() for p in self.parents()])
753 if len(parents) > 1:
755 if len(parents) > 1:
754 self.ui.status(_("working directory now based on "
756 self.ui.status(_("working directory now based on "
755 "revisions %d and %d\n") % parents)
757 "revisions %d and %d\n") % parents)
756 else:
758 else:
757 self.ui.status(_("working directory now based on "
759 self.ui.status(_("working directory now based on "
758 "revision %d\n") % parents)
760 "revision %d\n") % parents)
759 else:
761 else:
760 self.ui.warn(_("no rollback information available\n"))
762 self.ui.warn(_("no rollback information available\n"))
761 return 1
763 return 1
762 finally:
764 finally:
763 release(lock, wlock)
765 release(lock, wlock)
764
766
765 def invalidatecaches(self):
767 def invalidatecaches(self):
766 self._tags = None
768 self._tags = None
767 self._tagtypes = None
769 self._tagtypes = None
768 self.nodetagscache = None
770 self.nodetagscache = None
769 self._branchcache = None # in UTF-8
771 self._branchcache = None # in UTF-8
770 self._branchcachetip = None
772 self._branchcachetip = None
771
773
772 def invalidate(self):
774 def invalidate(self):
773 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkcurrent"):
775 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkcurrent"):
774 if a in self.__dict__:
776 if a in self.__dict__:
775 delattr(self, a)
777 delattr(self, a)
776 self.invalidatecaches()
778 self.invalidatecaches()
777
779
778 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
780 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
779 try:
781 try:
780 l = lock.lock(lockname, 0, releasefn, desc=desc)
782 l = lock.lock(lockname, 0, releasefn, desc=desc)
781 except error.LockHeld, inst:
783 except error.LockHeld, inst:
782 if not wait:
784 if not wait:
783 raise
785 raise
784 self.ui.warn(_("waiting for lock on %s held by %r\n") %
786 self.ui.warn(_("waiting for lock on %s held by %r\n") %
785 (desc, inst.locker))
787 (desc, inst.locker))
786 # default to 600 seconds timeout
788 # default to 600 seconds timeout
787 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
789 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
788 releasefn, desc=desc)
790 releasefn, desc=desc)
789 if acquirefn:
791 if acquirefn:
790 acquirefn()
792 acquirefn()
791 return l
793 return l
792
794
793 def lock(self, wait=True):
795 def lock(self, wait=True):
794 '''Lock the repository store (.hg/store) and return a weak reference
796 '''Lock the repository store (.hg/store) and return a weak reference
795 to the lock. Use this before modifying the store (e.g. committing or
797 to the lock. Use this before modifying the store (e.g. committing or
796 stripping). If you are opening a transaction, get a lock as well.)'''
798 stripping). If you are opening a transaction, get a lock as well.)'''
797 l = self._lockref and self._lockref()
799 l = self._lockref and self._lockref()
798 if l is not None and l.held:
800 if l is not None and l.held:
799 l.lock()
801 l.lock()
800 return l
802 return l
801
803
802 l = self._lock(self.sjoin("lock"), wait, self.store.write,
804 l = self._lock(self.sjoin("lock"), wait, self.store.write,
803 self.invalidate, _('repository %s') % self.origroot)
805 self.invalidate, _('repository %s') % self.origroot)
804 self._lockref = weakref.ref(l)
806 self._lockref = weakref.ref(l)
805 return l
807 return l
806
808
807 def wlock(self, wait=True):
809 def wlock(self, wait=True):
808 '''Lock the non-store parts of the repository (everything under
810 '''Lock the non-store parts of the repository (everything under
809 .hg except .hg/store) and return a weak reference to the lock.
811 .hg except .hg/store) and return a weak reference to the lock.
810 Use this before modifying files in .hg.'''
812 Use this before modifying files in .hg.'''
811 l = self._wlockref and self._wlockref()
813 l = self._wlockref and self._wlockref()
812 if l is not None and l.held:
814 if l is not None and l.held:
813 l.lock()
815 l.lock()
814 return l
816 return l
815
817
816 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
818 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
817 self.dirstate.invalidate, _('working directory of %s') %
819 self.dirstate.invalidate, _('working directory of %s') %
818 self.origroot)
820 self.origroot)
819 self._wlockref = weakref.ref(l)
821 self._wlockref = weakref.ref(l)
820 return l
822 return l
821
823
822 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
824 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
823 """
825 """
824 commit an individual file as part of a larger transaction
826 commit an individual file as part of a larger transaction
825 """
827 """
826
828
827 fname = fctx.path()
829 fname = fctx.path()
828 text = fctx.data()
830 text = fctx.data()
829 flog = self.file(fname)
831 flog = self.file(fname)
830 fparent1 = manifest1.get(fname, nullid)
832 fparent1 = manifest1.get(fname, nullid)
831 fparent2 = fparent2o = manifest2.get(fname, nullid)
833 fparent2 = fparent2o = manifest2.get(fname, nullid)
832
834
833 meta = {}
835 meta = {}
834 copy = fctx.renamed()
836 copy = fctx.renamed()
835 if copy and copy[0] != fname:
837 if copy and copy[0] != fname:
836 # Mark the new revision of this file as a copy of another
838 # Mark the new revision of this file as a copy of another
837 # file. This copy data will effectively act as a parent
839 # file. This copy data will effectively act as a parent
838 # of this new revision. If this is a merge, the first
840 # of this new revision. If this is a merge, the first
839 # parent will be the nullid (meaning "look up the copy data")
841 # parent will be the nullid (meaning "look up the copy data")
840 # and the second one will be the other parent. For example:
842 # and the second one will be the other parent. For example:
841 #
843 #
842 # 0 --- 1 --- 3 rev1 changes file foo
844 # 0 --- 1 --- 3 rev1 changes file foo
843 # \ / rev2 renames foo to bar and changes it
845 # \ / rev2 renames foo to bar and changes it
844 # \- 2 -/ rev3 should have bar with all changes and
846 # \- 2 -/ rev3 should have bar with all changes and
845 # should record that bar descends from
847 # should record that bar descends from
846 # bar in rev2 and foo in rev1
848 # bar in rev2 and foo in rev1
847 #
849 #
848 # this allows this merge to succeed:
850 # this allows this merge to succeed:
849 #
851 #
850 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
852 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
851 # \ / merging rev3 and rev4 should use bar@rev2
853 # \ / merging rev3 and rev4 should use bar@rev2
852 # \- 2 --- 4 as the merge base
854 # \- 2 --- 4 as the merge base
853 #
855 #
854
856
855 cfname = copy[0]
857 cfname = copy[0]
856 crev = manifest1.get(cfname)
858 crev = manifest1.get(cfname)
857 newfparent = fparent2
859 newfparent = fparent2
858
860
859 if manifest2: # branch merge
861 if manifest2: # branch merge
860 if fparent2 == nullid or crev is None: # copied on remote side
862 if fparent2 == nullid or crev is None: # copied on remote side
861 if cfname in manifest2:
863 if cfname in manifest2:
862 crev = manifest2[cfname]
864 crev = manifest2[cfname]
863 newfparent = fparent1
865 newfparent = fparent1
864
866
865 # find source in nearest ancestor if we've lost track
867 # find source in nearest ancestor if we've lost track
866 if not crev:
868 if not crev:
867 self.ui.debug(" %s: searching for copy revision for %s\n" %
869 self.ui.debug(" %s: searching for copy revision for %s\n" %
868 (fname, cfname))
870 (fname, cfname))
869 for ancestor in self[None].ancestors():
871 for ancestor in self[None].ancestors():
870 if cfname in ancestor:
872 if cfname in ancestor:
871 crev = ancestor[cfname].filenode()
873 crev = ancestor[cfname].filenode()
872 break
874 break
873
875
874 if crev:
876 if crev:
875 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
877 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
876 meta["copy"] = cfname
878 meta["copy"] = cfname
877 meta["copyrev"] = hex(crev)
879 meta["copyrev"] = hex(crev)
878 fparent1, fparent2 = nullid, newfparent
880 fparent1, fparent2 = nullid, newfparent
879 else:
881 else:
880 self.ui.warn(_("warning: can't find ancestor for '%s' "
882 self.ui.warn(_("warning: can't find ancestor for '%s' "
881 "copied from '%s'!\n") % (fname, cfname))
883 "copied from '%s'!\n") % (fname, cfname))
882
884
883 elif fparent2 != nullid:
885 elif fparent2 != nullid:
884 # is one parent an ancestor of the other?
886 # is one parent an ancestor of the other?
885 fparentancestor = flog.ancestor(fparent1, fparent2)
887 fparentancestor = flog.ancestor(fparent1, fparent2)
886 if fparentancestor == fparent1:
888 if fparentancestor == fparent1:
887 fparent1, fparent2 = fparent2, nullid
889 fparent1, fparent2 = fparent2, nullid
888 elif fparentancestor == fparent2:
890 elif fparentancestor == fparent2:
889 fparent2 = nullid
891 fparent2 = nullid
890
892
891 # is the file changed?
893 # is the file changed?
892 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
894 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
893 changelist.append(fname)
895 changelist.append(fname)
894 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
896 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
895
897
896 # are just the flags changed during merge?
898 # are just the flags changed during merge?
897 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
899 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
898 changelist.append(fname)
900 changelist.append(fname)
899
901
900 return fparent1
902 return fparent1
901
903
902 def commit(self, text="", user=None, date=None, match=None, force=False,
904 def commit(self, text="", user=None, date=None, match=None, force=False,
903 editor=False, extra={}):
905 editor=False, extra={}):
904 """Add a new revision to current repository.
906 """Add a new revision to current repository.
905
907
906 Revision information is gathered from the working directory,
908 Revision information is gathered from the working directory,
907 match can be used to filter the committed files. If editor is
909 match can be used to filter the committed files. If editor is
908 supplied, it is called to get a commit message.
910 supplied, it is called to get a commit message.
909 """
911 """
910
912
911 def fail(f, msg):
913 def fail(f, msg):
912 raise util.Abort('%s: %s' % (f, msg))
914 raise util.Abort('%s: %s' % (f, msg))
913
915
914 if not match:
916 if not match:
915 match = matchmod.always(self.root, '')
917 match = matchmod.always(self.root, '')
916
918
917 if not force:
919 if not force:
918 vdirs = []
920 vdirs = []
919 match.dir = vdirs.append
921 match.dir = vdirs.append
920 match.bad = fail
922 match.bad = fail
921
923
922 wlock = self.wlock()
924 wlock = self.wlock()
923 try:
925 try:
924 wctx = self[None]
926 wctx = self[None]
925 merge = len(wctx.parents()) > 1
927 merge = len(wctx.parents()) > 1
926
928
927 if (not force and merge and match and
929 if (not force and merge and match and
928 (match.files() or match.anypats())):
930 (match.files() or match.anypats())):
929 raise util.Abort(_('cannot partially commit a merge '
931 raise util.Abort(_('cannot partially commit a merge '
930 '(do not specify files or patterns)'))
932 '(do not specify files or patterns)'))
931
933
932 changes = self.status(match=match, clean=force)
934 changes = self.status(match=match, clean=force)
933 if force:
935 if force:
934 changes[0].extend(changes[6]) # mq may commit unchanged files
936 changes[0].extend(changes[6]) # mq may commit unchanged files
935
937
936 # check subrepos
938 # check subrepos
937 subs = []
939 subs = []
938 removedsubs = set()
940 removedsubs = set()
939 for p in wctx.parents():
941 for p in wctx.parents():
940 removedsubs.update(s for s in p.substate if match(s))
942 removedsubs.update(s for s in p.substate if match(s))
941 for s in wctx.substate:
943 for s in wctx.substate:
942 removedsubs.discard(s)
944 removedsubs.discard(s)
943 if match(s) and wctx.sub(s).dirty():
945 if match(s) and wctx.sub(s).dirty():
944 subs.append(s)
946 subs.append(s)
945 if (subs or removedsubs):
947 if (subs or removedsubs):
946 if (not match('.hgsub') and
948 if (not match('.hgsub') and
947 '.hgsub' in (wctx.modified() + wctx.added())):
949 '.hgsub' in (wctx.modified() + wctx.added())):
948 raise util.Abort(_("can't commit subrepos without .hgsub"))
950 raise util.Abort(_("can't commit subrepos without .hgsub"))
949 if '.hgsubstate' not in changes[0]:
951 if '.hgsubstate' not in changes[0]:
950 changes[0].insert(0, '.hgsubstate')
952 changes[0].insert(0, '.hgsubstate')
951
953
952 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
954 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
953 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
955 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
954 if changedsubs:
956 if changedsubs:
955 raise util.Abort(_("uncommitted changes in subrepo %s")
957 raise util.Abort(_("uncommitted changes in subrepo %s")
956 % changedsubs[0])
958 % changedsubs[0])
957
959
958 # make sure all explicit patterns are matched
960 # make sure all explicit patterns are matched
959 if not force and match.files():
961 if not force and match.files():
960 matched = set(changes[0] + changes[1] + changes[2])
962 matched = set(changes[0] + changes[1] + changes[2])
961
963
962 for f in match.files():
964 for f in match.files():
963 if f == '.' or f in matched or f in wctx.substate:
965 if f == '.' or f in matched or f in wctx.substate:
964 continue
966 continue
965 if f in changes[3]: # missing
967 if f in changes[3]: # missing
966 fail(f, _('file not found!'))
968 fail(f, _('file not found!'))
967 if f in vdirs: # visited directory
969 if f in vdirs: # visited directory
968 d = f + '/'
970 d = f + '/'
969 for mf in matched:
971 for mf in matched:
970 if mf.startswith(d):
972 if mf.startswith(d):
971 break
973 break
972 else:
974 else:
973 fail(f, _("no match under directory!"))
975 fail(f, _("no match under directory!"))
974 elif f not in self.dirstate:
976 elif f not in self.dirstate:
975 fail(f, _("file not tracked!"))
977 fail(f, _("file not tracked!"))
976
978
977 if (not force and not extra.get("close") and not merge
979 if (not force and not extra.get("close") and not merge
978 and not (changes[0] or changes[1] or changes[2])
980 and not (changes[0] or changes[1] or changes[2])
979 and wctx.branch() == wctx.p1().branch()):
981 and wctx.branch() == wctx.p1().branch()):
980 return None
982 return None
981
983
982 ms = mergemod.mergestate(self)
984 ms = mergemod.mergestate(self)
983 for f in changes[0]:
985 for f in changes[0]:
984 if f in ms and ms[f] == 'u':
986 if f in ms and ms[f] == 'u':
985 raise util.Abort(_("unresolved merge conflicts "
987 raise util.Abort(_("unresolved merge conflicts "
986 "(see hg help resolve)"))
988 "(see hg help resolve)"))
987
989
988 cctx = context.workingctx(self, text, user, date, extra, changes)
990 cctx = context.workingctx(self, text, user, date, extra, changes)
989 if editor:
991 if editor:
990 cctx._text = editor(self, cctx, subs)
992 cctx._text = editor(self, cctx, subs)
991 edited = (text != cctx._text)
993 edited = (text != cctx._text)
992
994
993 # commit subs
995 # commit subs
994 if subs or removedsubs:
996 if subs or removedsubs:
995 state = wctx.substate.copy()
997 state = wctx.substate.copy()
996 for s in sorted(subs):
998 for s in sorted(subs):
997 sub = wctx.sub(s)
999 sub = wctx.sub(s)
998 self.ui.status(_('committing subrepository %s\n') %
1000 self.ui.status(_('committing subrepository %s\n') %
999 subrepo.subrelpath(sub))
1001 subrepo.subrelpath(sub))
1000 sr = sub.commit(cctx._text, user, date)
1002 sr = sub.commit(cctx._text, user, date)
1001 state[s] = (state[s][0], sr)
1003 state[s] = (state[s][0], sr)
1002 subrepo.writestate(self, state)
1004 subrepo.writestate(self, state)
1003
1005
1004 # Save commit message in case this transaction gets rolled back
1006 # Save commit message in case this transaction gets rolled back
1005 # (e.g. by a pretxncommit hook). Leave the content alone on
1007 # (e.g. by a pretxncommit hook). Leave the content alone on
1006 # the assumption that the user will use the same editor again.
1008 # the assumption that the user will use the same editor again.
1007 msgfile = self.opener('last-message.txt', 'wb')
1009 msgfile = self.opener('last-message.txt', 'wb')
1008 msgfile.write(cctx._text)
1010 msgfile.write(cctx._text)
1009 msgfile.close()
1011 msgfile.close()
1010
1012
1011 p1, p2 = self.dirstate.parents()
1013 p1, p2 = self.dirstate.parents()
1012 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1014 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1013 try:
1015 try:
1014 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1016 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1015 ret = self.commitctx(cctx, True)
1017 ret = self.commitctx(cctx, True)
1016 except:
1018 except:
1017 if edited:
1019 if edited:
1018 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
1020 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
1019 self.ui.write(
1021 self.ui.write(
1020 _('note: commit message saved in %s\n') % msgfn)
1022 _('note: commit message saved in %s\n') % msgfn)
1021 raise
1023 raise
1022
1024
1023 # update bookmarks, dirstate and mergestate
1025 # update bookmarks, dirstate and mergestate
1024 bookmarks.update(self, p1, ret)
1026 bookmarks.update(self, p1, ret)
1025 for f in changes[0] + changes[1]:
1027 for f in changes[0] + changes[1]:
1026 self.dirstate.normal(f)
1028 self.dirstate.normal(f)
1027 for f in changes[2]:
1029 for f in changes[2]:
1028 self.dirstate.forget(f)
1030 self.dirstate.forget(f)
1029 self.dirstate.setparents(ret)
1031 self.dirstate.setparents(ret)
1030 ms.reset()
1032 ms.reset()
1031 finally:
1033 finally:
1032 wlock.release()
1034 wlock.release()
1033
1035
1034 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1036 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1035 return ret
1037 return ret
1036
1038
1037 def commitctx(self, ctx, error=False):
1039 def commitctx(self, ctx, error=False):
1038 """Add a new revision to current repository.
1040 """Add a new revision to current repository.
1039 Revision information is passed via the context argument.
1041 Revision information is passed via the context argument.
1040 """
1042 """
1041
1043
1042 tr = lock = None
1044 tr = lock = None
1043 removed = list(ctx.removed())
1045 removed = list(ctx.removed())
1044 p1, p2 = ctx.p1(), ctx.p2()
1046 p1, p2 = ctx.p1(), ctx.p2()
1045 m1 = p1.manifest().copy()
1047 m1 = p1.manifest().copy()
1046 m2 = p2.manifest()
1048 m2 = p2.manifest()
1047 user = ctx.user()
1049 user = ctx.user()
1048
1050
1049 lock = self.lock()
1051 lock = self.lock()
1050 try:
1052 try:
1051 tr = self.transaction("commit")
1053 tr = self.transaction("commit")
1052 trp = weakref.proxy(tr)
1054 trp = weakref.proxy(tr)
1053
1055
1054 # check in files
1056 # check in files
1055 new = {}
1057 new = {}
1056 changed = []
1058 changed = []
1057 linkrev = len(self)
1059 linkrev = len(self)
1058 for f in sorted(ctx.modified() + ctx.added()):
1060 for f in sorted(ctx.modified() + ctx.added()):
1059 self.ui.note(f + "\n")
1061 self.ui.note(f + "\n")
1060 try:
1062 try:
1061 fctx = ctx[f]
1063 fctx = ctx[f]
1062 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1064 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1063 changed)
1065 changed)
1064 m1.set(f, fctx.flags())
1066 m1.set(f, fctx.flags())
1065 except OSError, inst:
1067 except OSError, inst:
1066 self.ui.warn(_("trouble committing %s!\n") % f)
1068 self.ui.warn(_("trouble committing %s!\n") % f)
1067 raise
1069 raise
1068 except IOError, inst:
1070 except IOError, inst:
1069 errcode = getattr(inst, 'errno', errno.ENOENT)
1071 errcode = getattr(inst, 'errno', errno.ENOENT)
1070 if error or errcode and errcode != errno.ENOENT:
1072 if error or errcode and errcode != errno.ENOENT:
1071 self.ui.warn(_("trouble committing %s!\n") % f)
1073 self.ui.warn(_("trouble committing %s!\n") % f)
1072 raise
1074 raise
1073 else:
1075 else:
1074 removed.append(f)
1076 removed.append(f)
1075
1077
1076 # update manifest
1078 # update manifest
1077 m1.update(new)
1079 m1.update(new)
1078 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1080 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1079 drop = [f for f in removed if f in m1]
1081 drop = [f for f in removed if f in m1]
1080 for f in drop:
1082 for f in drop:
1081 del m1[f]
1083 del m1[f]
1082 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1084 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1083 p2.manifestnode(), (new, drop))
1085 p2.manifestnode(), (new, drop))
1084
1086
1085 # update changelog
1087 # update changelog
1086 self.changelog.delayupdate()
1088 self.changelog.delayupdate()
1087 n = self.changelog.add(mn, changed + removed, ctx.description(),
1089 n = self.changelog.add(mn, changed + removed, ctx.description(),
1088 trp, p1.node(), p2.node(),
1090 trp, p1.node(), p2.node(),
1089 user, ctx.date(), ctx.extra().copy())
1091 user, ctx.date(), ctx.extra().copy())
1090 p = lambda: self.changelog.writepending() and self.root or ""
1092 p = lambda: self.changelog.writepending() and self.root or ""
1091 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1093 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1092 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1094 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1093 parent2=xp2, pending=p)
1095 parent2=xp2, pending=p)
1094 self.changelog.finalize(trp)
1096 self.changelog.finalize(trp)
1095 tr.close()
1097 tr.close()
1096
1098
1097 if self._branchcache:
1099 if self._branchcache:
1098 self.updatebranchcache()
1100 self.updatebranchcache()
1099 return n
1101 return n
1100 finally:
1102 finally:
1101 if tr:
1103 if tr:
1102 tr.release()
1104 tr.release()
1103 lock.release()
1105 lock.release()
1104
1106
1105 def destroyed(self):
1107 def destroyed(self):
1106 '''Inform the repository that nodes have been destroyed.
1108 '''Inform the repository that nodes have been destroyed.
1107 Intended for use by strip and rollback, so there's a common
1109 Intended for use by strip and rollback, so there's a common
1108 place for anything that has to be done after destroying history.'''
1110 place for anything that has to be done after destroying history.'''
1109 # XXX it might be nice if we could take the list of destroyed
1111 # XXX it might be nice if we could take the list of destroyed
1110 # nodes, but I don't see an easy way for rollback() to do that
1112 # nodes, but I don't see an easy way for rollback() to do that
1111
1113
1112 # Ensure the persistent tag cache is updated. Doing it now
1114 # Ensure the persistent tag cache is updated. Doing it now
1113 # means that the tag cache only has to worry about destroyed
1115 # means that the tag cache only has to worry about destroyed
1114 # heads immediately after a strip/rollback. That in turn
1116 # heads immediately after a strip/rollback. That in turn
1115 # guarantees that "cachetip == currenttip" (comparing both rev
1117 # guarantees that "cachetip == currenttip" (comparing both rev
1116 # and node) always means no nodes have been added or destroyed.
1118 # and node) always means no nodes have been added or destroyed.
1117
1119
1118 # XXX this is suboptimal when qrefresh'ing: we strip the current
1120 # XXX this is suboptimal when qrefresh'ing: we strip the current
1119 # head, refresh the tag cache, then immediately add a new head.
1121 # head, refresh the tag cache, then immediately add a new head.
1120 # But I think doing it this way is necessary for the "instant
1122 # But I think doing it this way is necessary for the "instant
1121 # tag cache retrieval" case to work.
1123 # tag cache retrieval" case to work.
1122 self.invalidatecaches()
1124 self.invalidatecaches()
1123
1125
1124 def walk(self, match, node=None):
1126 def walk(self, match, node=None):
1125 '''
1127 '''
1126 walk recursively through the directory tree or a given
1128 walk recursively through the directory tree or a given
1127 changeset, finding all files matched by the match
1129 changeset, finding all files matched by the match
1128 function
1130 function
1129 '''
1131 '''
1130 return self[node].walk(match)
1132 return self[node].walk(match)
1131
1133
1132 def status(self, node1='.', node2=None, match=None,
1134 def status(self, node1='.', node2=None, match=None,
1133 ignored=False, clean=False, unknown=False,
1135 ignored=False, clean=False, unknown=False,
1134 listsubrepos=False):
1136 listsubrepos=False):
1135 """return status of files between two nodes or node and working directory
1137 """return status of files between two nodes or node and working directory
1136
1138
1137 If node1 is None, use the first dirstate parent instead.
1139 If node1 is None, use the first dirstate parent instead.
1138 If node2 is None, compare node1 with working directory.
1140 If node2 is None, compare node1 with working directory.
1139 """
1141 """
1140
1142
1141 def mfmatches(ctx):
1143 def mfmatches(ctx):
1142 mf = ctx.manifest().copy()
1144 mf = ctx.manifest().copy()
1143 for fn in mf.keys():
1145 for fn in mf.keys():
1144 if not match(fn):
1146 if not match(fn):
1145 del mf[fn]
1147 del mf[fn]
1146 return mf
1148 return mf
1147
1149
1148 if isinstance(node1, context.changectx):
1150 if isinstance(node1, context.changectx):
1149 ctx1 = node1
1151 ctx1 = node1
1150 else:
1152 else:
1151 ctx1 = self[node1]
1153 ctx1 = self[node1]
1152 if isinstance(node2, context.changectx):
1154 if isinstance(node2, context.changectx):
1153 ctx2 = node2
1155 ctx2 = node2
1154 else:
1156 else:
1155 ctx2 = self[node2]
1157 ctx2 = self[node2]
1156
1158
1157 working = ctx2.rev() is None
1159 working = ctx2.rev() is None
1158 parentworking = working and ctx1 == self['.']
1160 parentworking = working and ctx1 == self['.']
1159 match = match or matchmod.always(self.root, self.getcwd())
1161 match = match or matchmod.always(self.root, self.getcwd())
1160 listignored, listclean, listunknown = ignored, clean, unknown
1162 listignored, listclean, listunknown = ignored, clean, unknown
1161
1163
1162 # load earliest manifest first for caching reasons
1164 # load earliest manifest first for caching reasons
1163 if not working and ctx2.rev() < ctx1.rev():
1165 if not working and ctx2.rev() < ctx1.rev():
1164 ctx2.manifest()
1166 ctx2.manifest()
1165
1167
1166 if not parentworking:
1168 if not parentworking:
1167 def bad(f, msg):
1169 def bad(f, msg):
1168 if f not in ctx1:
1170 if f not in ctx1:
1169 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1171 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1170 match.bad = bad
1172 match.bad = bad
1171
1173
1172 if working: # we need to scan the working dir
1174 if working: # we need to scan the working dir
1173 subrepos = []
1175 subrepos = []
1174 if '.hgsub' in self.dirstate:
1176 if '.hgsub' in self.dirstate:
1175 subrepos = ctx1.substate.keys()
1177 subrepos = ctx1.substate.keys()
1176 s = self.dirstate.status(match, subrepos, listignored,
1178 s = self.dirstate.status(match, subrepos, listignored,
1177 listclean, listunknown)
1179 listclean, listunknown)
1178 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1180 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1179
1181
1180 # check for any possibly clean files
1182 # check for any possibly clean files
1181 if parentworking and cmp:
1183 if parentworking and cmp:
1182 fixup = []
1184 fixup = []
1183 # do a full compare of any files that might have changed
1185 # do a full compare of any files that might have changed
1184 for f in sorted(cmp):
1186 for f in sorted(cmp):
1185 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1187 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1186 or ctx1[f].cmp(ctx2[f])):
1188 or ctx1[f].cmp(ctx2[f])):
1187 modified.append(f)
1189 modified.append(f)
1188 else:
1190 else:
1189 fixup.append(f)
1191 fixup.append(f)
1190
1192
1191 # update dirstate for files that are actually clean
1193 # update dirstate for files that are actually clean
1192 if fixup:
1194 if fixup:
1193 if listclean:
1195 if listclean:
1194 clean += fixup
1196 clean += fixup
1195
1197
1196 try:
1198 try:
1197 # updating the dirstate is optional
1199 # updating the dirstate is optional
1198 # so we don't wait on the lock
1200 # so we don't wait on the lock
1199 wlock = self.wlock(False)
1201 wlock = self.wlock(False)
1200 try:
1202 try:
1201 for f in fixup:
1203 for f in fixup:
1202 self.dirstate.normal(f)
1204 self.dirstate.normal(f)
1203 finally:
1205 finally:
1204 wlock.release()
1206 wlock.release()
1205 except error.LockError:
1207 except error.LockError:
1206 pass
1208 pass
1207
1209
1208 if not parentworking:
1210 if not parentworking:
1209 mf1 = mfmatches(ctx1)
1211 mf1 = mfmatches(ctx1)
1210 if working:
1212 if working:
1211 # we are comparing working dir against non-parent
1213 # we are comparing working dir against non-parent
1212 # generate a pseudo-manifest for the working dir
1214 # generate a pseudo-manifest for the working dir
1213 mf2 = mfmatches(self['.'])
1215 mf2 = mfmatches(self['.'])
1214 for f in cmp + modified + added:
1216 for f in cmp + modified + added:
1215 mf2[f] = None
1217 mf2[f] = None
1216 mf2.set(f, ctx2.flags(f))
1218 mf2.set(f, ctx2.flags(f))
1217 for f in removed:
1219 for f in removed:
1218 if f in mf2:
1220 if f in mf2:
1219 del mf2[f]
1221 del mf2[f]
1220 else:
1222 else:
1221 # we are comparing two revisions
1223 # we are comparing two revisions
1222 deleted, unknown, ignored = [], [], []
1224 deleted, unknown, ignored = [], [], []
1223 mf2 = mfmatches(ctx2)
1225 mf2 = mfmatches(ctx2)
1224
1226
1225 modified, added, clean = [], [], []
1227 modified, added, clean = [], [], []
1226 for fn in mf2:
1228 for fn in mf2:
1227 if fn in mf1:
1229 if fn in mf1:
1228 if (fn not in deleted and
1230 if (fn not in deleted and
1229 (mf1.flags(fn) != mf2.flags(fn) or
1231 (mf1.flags(fn) != mf2.flags(fn) or
1230 (mf1[fn] != mf2[fn] and
1232 (mf1[fn] != mf2[fn] and
1231 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1233 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1232 modified.append(fn)
1234 modified.append(fn)
1233 elif listclean:
1235 elif listclean:
1234 clean.append(fn)
1236 clean.append(fn)
1235 del mf1[fn]
1237 del mf1[fn]
1236 elif fn not in deleted:
1238 elif fn not in deleted:
1237 added.append(fn)
1239 added.append(fn)
1238 removed = mf1.keys()
1240 removed = mf1.keys()
1239
1241
1240 r = modified, added, removed, deleted, unknown, ignored, clean
1242 r = modified, added, removed, deleted, unknown, ignored, clean
1241
1243
1242 if listsubrepos:
1244 if listsubrepos:
1243 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1245 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1244 if working:
1246 if working:
1245 rev2 = None
1247 rev2 = None
1246 else:
1248 else:
1247 rev2 = ctx2.substate[subpath][1]
1249 rev2 = ctx2.substate[subpath][1]
1248 try:
1250 try:
1249 submatch = matchmod.narrowmatcher(subpath, match)
1251 submatch = matchmod.narrowmatcher(subpath, match)
1250 s = sub.status(rev2, match=submatch, ignored=listignored,
1252 s = sub.status(rev2, match=submatch, ignored=listignored,
1251 clean=listclean, unknown=listunknown,
1253 clean=listclean, unknown=listunknown,
1252 listsubrepos=True)
1254 listsubrepos=True)
1253 for rfiles, sfiles in zip(r, s):
1255 for rfiles, sfiles in zip(r, s):
1254 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1256 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1255 except error.LookupError:
1257 except error.LookupError:
1256 self.ui.status(_("skipping missing subrepository: %s\n")
1258 self.ui.status(_("skipping missing subrepository: %s\n")
1257 % subpath)
1259 % subpath)
1258
1260
1259 for l in r:
1261 for l in r:
1260 l.sort()
1262 l.sort()
1261 return r
1263 return r
1262
1264
1263 def heads(self, start=None):
1265 def heads(self, start=None):
1264 heads = self.changelog.heads(start)
1266 heads = self.changelog.heads(start)
1265 # sort the output in rev descending order
1267 # sort the output in rev descending order
1266 return sorted(heads, key=self.changelog.rev, reverse=True)
1268 return sorted(heads, key=self.changelog.rev, reverse=True)
1267
1269
1268 def branchheads(self, branch=None, start=None, closed=False):
1270 def branchheads(self, branch=None, start=None, closed=False):
1269 '''return a (possibly filtered) list of heads for the given branch
1271 '''return a (possibly filtered) list of heads for the given branch
1270
1272
1271 Heads are returned in topological order, from newest to oldest.
1273 Heads are returned in topological order, from newest to oldest.
1272 If branch is None, use the dirstate branch.
1274 If branch is None, use the dirstate branch.
1273 If start is not None, return only heads reachable from start.
1275 If start is not None, return only heads reachable from start.
1274 If closed is True, return heads that are marked as closed as well.
1276 If closed is True, return heads that are marked as closed as well.
1275 '''
1277 '''
1276 if branch is None:
1278 if branch is None:
1277 branch = self[None].branch()
1279 branch = self[None].branch()
1278 branches = self.branchmap()
1280 branches = self.branchmap()
1279 if branch not in branches:
1281 if branch not in branches:
1280 return []
1282 return []
1281 # the cache returns heads ordered lowest to highest
1283 # the cache returns heads ordered lowest to highest
1282 bheads = list(reversed(branches[branch]))
1284 bheads = list(reversed(branches[branch]))
1283 if start is not None:
1285 if start is not None:
1284 # filter out the heads that cannot be reached from startrev
1286 # filter out the heads that cannot be reached from startrev
1285 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1287 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1286 bheads = [h for h in bheads if h in fbheads]
1288 bheads = [h for h in bheads if h in fbheads]
1287 if not closed:
1289 if not closed:
1288 bheads = [h for h in bheads if
1290 bheads = [h for h in bheads if
1289 ('close' not in self.changelog.read(h)[5])]
1291 ('close' not in self.changelog.read(h)[5])]
1290 return bheads
1292 return bheads
1291
1293
1292 def branches(self, nodes):
1294 def branches(self, nodes):
1293 if not nodes:
1295 if not nodes:
1294 nodes = [self.changelog.tip()]
1296 nodes = [self.changelog.tip()]
1295 b = []
1297 b = []
1296 for n in nodes:
1298 for n in nodes:
1297 t = n
1299 t = n
1298 while 1:
1300 while 1:
1299 p = self.changelog.parents(n)
1301 p = self.changelog.parents(n)
1300 if p[1] != nullid or p[0] == nullid:
1302 if p[1] != nullid or p[0] == nullid:
1301 b.append((t, n, p[0], p[1]))
1303 b.append((t, n, p[0], p[1]))
1302 break
1304 break
1303 n = p[0]
1305 n = p[0]
1304 return b
1306 return b
1305
1307
1306 def between(self, pairs):
1308 def between(self, pairs):
1307 r = []
1309 r = []
1308
1310
1309 for top, bottom in pairs:
1311 for top, bottom in pairs:
1310 n, l, i = top, [], 0
1312 n, l, i = top, [], 0
1311 f = 1
1313 f = 1
1312
1314
1313 while n != bottom and n != nullid:
1315 while n != bottom and n != nullid:
1314 p = self.changelog.parents(n)[0]
1316 p = self.changelog.parents(n)[0]
1315 if i == f:
1317 if i == f:
1316 l.append(n)
1318 l.append(n)
1317 f = f * 2
1319 f = f * 2
1318 n = p
1320 n = p
1319 i += 1
1321 i += 1
1320
1322
1321 r.append(l)
1323 r.append(l)
1322
1324
1323 return r
1325 return r
1324
1326
1325 def pull(self, remote, heads=None, force=False):
1327 def pull(self, remote, heads=None, force=False):
1326 lock = self.lock()
1328 lock = self.lock()
1327 try:
1329 try:
1328 usecommon = remote.capable('getbundle')
1330 usecommon = remote.capable('getbundle')
1329 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1331 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1330 force=force, commononly=usecommon)
1332 force=force, commononly=usecommon)
1331 common, fetch, rheads = tmp
1333 common, fetch, rheads = tmp
1332 if not fetch:
1334 if not fetch:
1333 self.ui.status(_("no changes found\n"))
1335 self.ui.status(_("no changes found\n"))
1334 result = 0
1336 result = 0
1335 else:
1337 else:
1336 if heads is None and list(common) == [nullid]:
1338 if heads is None and list(common) == [nullid]:
1337 self.ui.status(_("requesting all changes\n"))
1339 self.ui.status(_("requesting all changes\n"))
1338 elif heads is None and remote.capable('changegroupsubset'):
1340 elif heads is None and remote.capable('changegroupsubset'):
1339 # issue1320, avoid a race if remote changed after discovery
1341 # issue1320, avoid a race if remote changed after discovery
1340 heads = rheads
1342 heads = rheads
1341
1343
1342 if usecommon:
1344 if usecommon:
1343 cg = remote.getbundle('pull', common=common,
1345 cg = remote.getbundle('pull', common=common,
1344 heads=heads or rheads)
1346 heads=heads or rheads)
1345 elif heads is None:
1347 elif heads is None:
1346 cg = remote.changegroup(fetch, 'pull')
1348 cg = remote.changegroup(fetch, 'pull')
1347 elif not remote.capable('changegroupsubset'):
1349 elif not remote.capable('changegroupsubset'):
1348 raise util.Abort(_("partial pull cannot be done because "
1350 raise util.Abort(_("partial pull cannot be done because "
1349 "other repository doesn't support "
1351 "other repository doesn't support "
1350 "changegroupsubset."))
1352 "changegroupsubset."))
1351 else:
1353 else:
1352 cg = remote.changegroupsubset(fetch, heads, 'pull')
1354 cg = remote.changegroupsubset(fetch, heads, 'pull')
1353 result = self.addchangegroup(cg, 'pull', remote.url(),
1355 result = self.addchangegroup(cg, 'pull', remote.url(),
1354 lock=lock)
1356 lock=lock)
1355 finally:
1357 finally:
1356 lock.release()
1358 lock.release()
1357
1359
1358 return result
1360 return result
1359
1361
1360 def checkpush(self, force, revs):
1362 def checkpush(self, force, revs):
1361 """Extensions can override this function if additional checks have
1363 """Extensions can override this function if additional checks have
1362 to be performed before pushing, or call it if they override push
1364 to be performed before pushing, or call it if they override push
1363 command.
1365 command.
1364 """
1366 """
1365 pass
1367 pass
1366
1368
1367 def push(self, remote, force=False, revs=None, newbranch=False):
1369 def push(self, remote, force=False, revs=None, newbranch=False):
1368 '''Push outgoing changesets (limited by revs) from the current
1370 '''Push outgoing changesets (limited by revs) from the current
1369 repository to remote. Return an integer:
1371 repository to remote. Return an integer:
1370 - 0 means HTTP error *or* nothing to push
1372 - 0 means HTTP error *or* nothing to push
1371 - 1 means we pushed and remote head count is unchanged *or*
1373 - 1 means we pushed and remote head count is unchanged *or*
1372 we have outgoing changesets but refused to push
1374 we have outgoing changesets but refused to push
1373 - other values as described by addchangegroup()
1375 - other values as described by addchangegroup()
1374 '''
1376 '''
1375 # there are two ways to push to remote repo:
1377 # there are two ways to push to remote repo:
1376 #
1378 #
1377 # addchangegroup assumes local user can lock remote
1379 # addchangegroup assumes local user can lock remote
1378 # repo (local filesystem, old ssh servers).
1380 # repo (local filesystem, old ssh servers).
1379 #
1381 #
1380 # unbundle assumes local user cannot lock remote repo (new ssh
1382 # unbundle assumes local user cannot lock remote repo (new ssh
1381 # servers, http servers).
1383 # servers, http servers).
1382
1384
1383 self.checkpush(force, revs)
1385 self.checkpush(force, revs)
1384 lock = None
1386 lock = None
1385 unbundle = remote.capable('unbundle')
1387 unbundle = remote.capable('unbundle')
1386 if not unbundle:
1388 if not unbundle:
1387 lock = remote.lock()
1389 lock = remote.lock()
1388 try:
1390 try:
1389 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1391 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1390 newbranch)
1392 newbranch)
1391 ret = remote_heads
1393 ret = remote_heads
1392 if cg is not None:
1394 if cg is not None:
1393 if unbundle:
1395 if unbundle:
1394 # local repo finds heads on server, finds out what
1396 # local repo finds heads on server, finds out what
1395 # revs it must push. once revs transferred, if server
1397 # revs it must push. once revs transferred, if server
1396 # finds it has different heads (someone else won
1398 # finds it has different heads (someone else won
1397 # commit/push race), server aborts.
1399 # commit/push race), server aborts.
1398 if force:
1400 if force:
1399 remote_heads = ['force']
1401 remote_heads = ['force']
1400 # ssh: return remote's addchangegroup()
1402 # ssh: return remote's addchangegroup()
1401 # http: return remote's addchangegroup() or 0 for error
1403 # http: return remote's addchangegroup() or 0 for error
1402 ret = remote.unbundle(cg, remote_heads, 'push')
1404 ret = remote.unbundle(cg, remote_heads, 'push')
1403 else:
1405 else:
1404 # we return an integer indicating remote head count change
1406 # we return an integer indicating remote head count change
1405 ret = remote.addchangegroup(cg, 'push', self.url(),
1407 ret = remote.addchangegroup(cg, 'push', self.url(),
1406 lock=lock)
1408 lock=lock)
1407 finally:
1409 finally:
1408 if lock is not None:
1410 if lock is not None:
1409 lock.release()
1411 lock.release()
1410
1412
1411 self.ui.debug("checking for updated bookmarks\n")
1413 self.ui.debug("checking for updated bookmarks\n")
1412 rb = remote.listkeys('bookmarks')
1414 rb = remote.listkeys('bookmarks')
1413 for k in rb.keys():
1415 for k in rb.keys():
1414 if k in self._bookmarks:
1416 if k in self._bookmarks:
1415 nr, nl = rb[k], hex(self._bookmarks[k])
1417 nr, nl = rb[k], hex(self._bookmarks[k])
1416 if nr in self:
1418 if nr in self:
1417 cr = self[nr]
1419 cr = self[nr]
1418 cl = self[nl]
1420 cl = self[nl]
1419 if cl in cr.descendants():
1421 if cl in cr.descendants():
1420 r = remote.pushkey('bookmarks', k, nr, nl)
1422 r = remote.pushkey('bookmarks', k, nr, nl)
1421 if r:
1423 if r:
1422 self.ui.status(_("updating bookmark %s\n") % k)
1424 self.ui.status(_("updating bookmark %s\n") % k)
1423 else:
1425 else:
1424 self.ui.warn(_('updating bookmark %s'
1426 self.ui.warn(_('updating bookmark %s'
1425 ' failed!\n') % k)
1427 ' failed!\n') % k)
1426
1428
1427 return ret
1429 return ret
1428
1430
1429 def changegroupinfo(self, nodes, source):
1431 def changegroupinfo(self, nodes, source):
1430 if self.ui.verbose or source == 'bundle':
1432 if self.ui.verbose or source == 'bundle':
1431 self.ui.status(_("%d changesets found\n") % len(nodes))
1433 self.ui.status(_("%d changesets found\n") % len(nodes))
1432 if self.ui.debugflag:
1434 if self.ui.debugflag:
1433 self.ui.debug("list of changesets:\n")
1435 self.ui.debug("list of changesets:\n")
1434 for node in nodes:
1436 for node in nodes:
1435 self.ui.debug("%s\n" % hex(node))
1437 self.ui.debug("%s\n" % hex(node))
1436
1438
1437 def changegroupsubset(self, bases, heads, source):
1439 def changegroupsubset(self, bases, heads, source):
1438 """Compute a changegroup consisting of all the nodes that are
1440 """Compute a changegroup consisting of all the nodes that are
1439 descendents of any of the bases and ancestors of any of the heads.
1441 descendents of any of the bases and ancestors of any of the heads.
1440 Return a chunkbuffer object whose read() method will return
1442 Return a chunkbuffer object whose read() method will return
1441 successive changegroup chunks.
1443 successive changegroup chunks.
1442
1444
1443 It is fairly complex as determining which filenodes and which
1445 It is fairly complex as determining which filenodes and which
1444 manifest nodes need to be included for the changeset to be complete
1446 manifest nodes need to be included for the changeset to be complete
1445 is non-trivial.
1447 is non-trivial.
1446
1448
1447 Another wrinkle is doing the reverse, figuring out which changeset in
1449 Another wrinkle is doing the reverse, figuring out which changeset in
1448 the changegroup a particular filenode or manifestnode belongs to.
1450 the changegroup a particular filenode or manifestnode belongs to.
1449 """
1451 """
1450 cl = self.changelog
1452 cl = self.changelog
1451 if not bases:
1453 if not bases:
1452 bases = [nullid]
1454 bases = [nullid]
1453 csets, bases, heads = cl.nodesbetween(bases, heads)
1455 csets, bases, heads = cl.nodesbetween(bases, heads)
1454 # We assume that all ancestors of bases are known
1456 # We assume that all ancestors of bases are known
1455 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1457 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1456 return self._changegroupsubset(common, csets, heads, source)
1458 return self._changegroupsubset(common, csets, heads, source)
1457
1459
1458 def getbundle(self, source, heads=None, common=None):
1460 def getbundle(self, source, heads=None, common=None):
1459 """Like changegroupsubset, but returns the set difference between the
1461 """Like changegroupsubset, but returns the set difference between the
1460 ancestors of heads and the ancestors common.
1462 ancestors of heads and the ancestors common.
1461
1463
1462 If heads is None, use the local heads. If common is None, use [nullid].
1464 If heads is None, use the local heads. If common is None, use [nullid].
1463
1465
1464 The nodes in common might not all be known locally due to the way the
1466 The nodes in common might not all be known locally due to the way the
1465 current discovery protocol works.
1467 current discovery protocol works.
1466 """
1468 """
1467 cl = self.changelog
1469 cl = self.changelog
1468 if common:
1470 if common:
1469 nm = cl.nodemap
1471 nm = cl.nodemap
1470 common = [n for n in common if n in nm]
1472 common = [n for n in common if n in nm]
1471 else:
1473 else:
1472 common = [nullid]
1474 common = [nullid]
1473 if not heads:
1475 if not heads:
1474 heads = cl.heads()
1476 heads = cl.heads()
1475 common, missing = cl.findcommonmissing(common, heads)
1477 common, missing = cl.findcommonmissing(common, heads)
1476 return self._changegroupsubset(common, missing, heads, source)
1478 return self._changegroupsubset(common, missing, heads, source)
1477
1479
1478 def _changegroupsubset(self, commonrevs, csets, heads, source):
1480 def _changegroupsubset(self, commonrevs, csets, heads, source):
1479
1481
1480 cl = self.changelog
1482 cl = self.changelog
1481 mf = self.manifest
1483 mf = self.manifest
1482 mfs = {} # needed manifests
1484 mfs = {} # needed manifests
1483 fnodes = {} # needed file nodes
1485 fnodes = {} # needed file nodes
1484 changedfiles = set()
1486 changedfiles = set()
1485 fstate = ['', {}]
1487 fstate = ['', {}]
1486 count = [0]
1488 count = [0]
1487
1489
1488 # can we go through the fast path ?
1490 # can we go through the fast path ?
1489 heads.sort()
1491 heads.sort()
1490 if heads == sorted(self.heads()):
1492 if heads == sorted(self.heads()):
1491 return self._changegroup(csets, source)
1493 return self._changegroup(csets, source)
1492
1494
1493 # slow path
1495 # slow path
1494 self.hook('preoutgoing', throw=True, source=source)
1496 self.hook('preoutgoing', throw=True, source=source)
1495 self.changegroupinfo(csets, source)
1497 self.changegroupinfo(csets, source)
1496
1498
1497 # filter any nodes that claim to be part of the known set
1499 # filter any nodes that claim to be part of the known set
1498 def prune(revlog, missing):
1500 def prune(revlog, missing):
1499 for n in missing:
1501 for n in missing:
1500 if revlog.linkrev(revlog.rev(n)) not in commonrevs:
1502 if revlog.linkrev(revlog.rev(n)) not in commonrevs:
1501 yield n
1503 yield n
1502
1504
1503 def lookup(revlog, x):
1505 def lookup(revlog, x):
1504 if revlog == cl:
1506 if revlog == cl:
1505 c = cl.read(x)
1507 c = cl.read(x)
1506 changedfiles.update(c[3])
1508 changedfiles.update(c[3])
1507 mfs.setdefault(c[0], x)
1509 mfs.setdefault(c[0], x)
1508 count[0] += 1
1510 count[0] += 1
1509 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1511 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1510 return x
1512 return x
1511 elif revlog == mf:
1513 elif revlog == mf:
1512 clnode = mfs[x]
1514 clnode = mfs[x]
1513 mdata = mf.readfast(x)
1515 mdata = mf.readfast(x)
1514 for f in changedfiles:
1516 for f in changedfiles:
1515 if f in mdata:
1517 if f in mdata:
1516 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1518 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1517 count[0] += 1
1519 count[0] += 1
1518 self.ui.progress(_('bundling'), count[0],
1520 self.ui.progress(_('bundling'), count[0],
1519 unit=_('manifests'), total=len(mfs))
1521 unit=_('manifests'), total=len(mfs))
1520 return mfs[x]
1522 return mfs[x]
1521 else:
1523 else:
1522 self.ui.progress(
1524 self.ui.progress(
1523 _('bundling'), count[0], item=fstate[0],
1525 _('bundling'), count[0], item=fstate[0],
1524 unit=_('files'), total=len(changedfiles))
1526 unit=_('files'), total=len(changedfiles))
1525 return fstate[1][x]
1527 return fstate[1][x]
1526
1528
1527 bundler = changegroup.bundle10(lookup)
1529 bundler = changegroup.bundle10(lookup)
1528
1530
1529 def gengroup():
1531 def gengroup():
1530 # Create a changenode group generator that will call our functions
1532 # Create a changenode group generator that will call our functions
1531 # back to lookup the owning changenode and collect information.
1533 # back to lookup the owning changenode and collect information.
1532 for chunk in cl.group(csets, bundler):
1534 for chunk in cl.group(csets, bundler):
1533 yield chunk
1535 yield chunk
1534 self.ui.progress(_('bundling'), None)
1536 self.ui.progress(_('bundling'), None)
1535
1537
1536 # Create a generator for the manifestnodes that calls our lookup
1538 # Create a generator for the manifestnodes that calls our lookup
1537 # and data collection functions back.
1539 # and data collection functions back.
1538 count[0] = 0
1540 count[0] = 0
1539 for chunk in mf.group(prune(mf, mfs), bundler):
1541 for chunk in mf.group(prune(mf, mfs), bundler):
1540 yield chunk
1542 yield chunk
1541 self.ui.progress(_('bundling'), None)
1543 self.ui.progress(_('bundling'), None)
1542
1544
1543 mfs.clear()
1545 mfs.clear()
1544
1546
1545 # Go through all our files in order sorted by name.
1547 # Go through all our files in order sorted by name.
1546 count[0] = 0
1548 count[0] = 0
1547 for fname in sorted(changedfiles):
1549 for fname in sorted(changedfiles):
1548 filerevlog = self.file(fname)
1550 filerevlog = self.file(fname)
1549 if not len(filerevlog):
1551 if not len(filerevlog):
1550 raise util.Abort(_("empty or missing revlog for %s") % fname)
1552 raise util.Abort(_("empty or missing revlog for %s") % fname)
1551 fstate[0] = fname
1553 fstate[0] = fname
1552 fstate[1] = fnodes.pop(fname, {})
1554 fstate[1] = fnodes.pop(fname, {})
1553 first = True
1555 first = True
1554
1556
1555 for chunk in filerevlog.group(prune(filerevlog, fstate[1]),
1557 for chunk in filerevlog.group(prune(filerevlog, fstate[1]),
1556 bundler):
1558 bundler):
1557 if first:
1559 if first:
1558 if chunk == bundler.close():
1560 if chunk == bundler.close():
1559 break
1561 break
1560 count[0] += 1
1562 count[0] += 1
1561 yield bundler.fileheader(fname)
1563 yield bundler.fileheader(fname)
1562 first = False
1564 first = False
1563 yield chunk
1565 yield chunk
1564 # Signal that no more groups are left.
1566 # Signal that no more groups are left.
1565 yield bundler.close()
1567 yield bundler.close()
1566 self.ui.progress(_('bundling'), None)
1568 self.ui.progress(_('bundling'), None)
1567
1569
1568 if csets:
1570 if csets:
1569 self.hook('outgoing', node=hex(csets[0]), source=source)
1571 self.hook('outgoing', node=hex(csets[0]), source=source)
1570
1572
1571 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1573 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1572
1574
1573 def changegroup(self, basenodes, source):
1575 def changegroup(self, basenodes, source):
1574 # to avoid a race we use changegroupsubset() (issue1320)
1576 # to avoid a race we use changegroupsubset() (issue1320)
1575 return self.changegroupsubset(basenodes, self.heads(), source)
1577 return self.changegroupsubset(basenodes, self.heads(), source)
1576
1578
1577 def _changegroup(self, nodes, source):
1579 def _changegroup(self, nodes, source):
1578 """Compute the changegroup of all nodes that we have that a recipient
1580 """Compute the changegroup of all nodes that we have that a recipient
1579 doesn't. Return a chunkbuffer object whose read() method will return
1581 doesn't. Return a chunkbuffer object whose read() method will return
1580 successive changegroup chunks.
1582 successive changegroup chunks.
1581
1583
1582 This is much easier than the previous function as we can assume that
1584 This is much easier than the previous function as we can assume that
1583 the recipient has any changenode we aren't sending them.
1585 the recipient has any changenode we aren't sending them.
1584
1586
1585 nodes is the set of nodes to send"""
1587 nodes is the set of nodes to send"""
1586
1588
1587 cl = self.changelog
1589 cl = self.changelog
1588 mf = self.manifest
1590 mf = self.manifest
1589 mfs = {}
1591 mfs = {}
1590 changedfiles = set()
1592 changedfiles = set()
1591 fstate = ['']
1593 fstate = ['']
1592 count = [0]
1594 count = [0]
1593
1595
1594 self.hook('preoutgoing', throw=True, source=source)
1596 self.hook('preoutgoing', throw=True, source=source)
1595 self.changegroupinfo(nodes, source)
1597 self.changegroupinfo(nodes, source)
1596
1598
1597 revset = set([cl.rev(n) for n in nodes])
1599 revset = set([cl.rev(n) for n in nodes])
1598
1600
1599 def gennodelst(log):
1601 def gennodelst(log):
1600 for r in log:
1602 for r in log:
1601 if log.linkrev(r) in revset:
1603 if log.linkrev(r) in revset:
1602 yield log.node(r)
1604 yield log.node(r)
1603
1605
1604 def lookup(revlog, x):
1606 def lookup(revlog, x):
1605 if revlog == cl:
1607 if revlog == cl:
1606 c = cl.read(x)
1608 c = cl.read(x)
1607 changedfiles.update(c[3])
1609 changedfiles.update(c[3])
1608 mfs.setdefault(c[0], x)
1610 mfs.setdefault(c[0], x)
1609 count[0] += 1
1611 count[0] += 1
1610 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1612 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1611 return x
1613 return x
1612 elif revlog == mf:
1614 elif revlog == mf:
1613 count[0] += 1
1615 count[0] += 1
1614 self.ui.progress(_('bundling'), count[0],
1616 self.ui.progress(_('bundling'), count[0],
1615 unit=_('manifests'), total=len(mfs))
1617 unit=_('manifests'), total=len(mfs))
1616 return cl.node(revlog.linkrev(revlog.rev(x)))
1618 return cl.node(revlog.linkrev(revlog.rev(x)))
1617 else:
1619 else:
1618 self.ui.progress(
1620 self.ui.progress(
1619 _('bundling'), count[0], item=fstate[0],
1621 _('bundling'), count[0], item=fstate[0],
1620 total=len(changedfiles), unit=_('files'))
1622 total=len(changedfiles), unit=_('files'))
1621 return cl.node(revlog.linkrev(revlog.rev(x)))
1623 return cl.node(revlog.linkrev(revlog.rev(x)))
1622
1624
1623 bundler = changegroup.bundle10(lookup)
1625 bundler = changegroup.bundle10(lookup)
1624
1626
1625 def gengroup():
1627 def gengroup():
1626 '''yield a sequence of changegroup chunks (strings)'''
1628 '''yield a sequence of changegroup chunks (strings)'''
1627 # construct a list of all changed files
1629 # construct a list of all changed files
1628
1630
1629 for chunk in cl.group(nodes, bundler):
1631 for chunk in cl.group(nodes, bundler):
1630 yield chunk
1632 yield chunk
1631 self.ui.progress(_('bundling'), None)
1633 self.ui.progress(_('bundling'), None)
1632
1634
1633 count[0] = 0
1635 count[0] = 0
1634 for chunk in mf.group(gennodelst(mf), bundler):
1636 for chunk in mf.group(gennodelst(mf), bundler):
1635 yield chunk
1637 yield chunk
1636 self.ui.progress(_('bundling'), None)
1638 self.ui.progress(_('bundling'), None)
1637
1639
1638 count[0] = 0
1640 count[0] = 0
1639 for fname in sorted(changedfiles):
1641 for fname in sorted(changedfiles):
1640 filerevlog = self.file(fname)
1642 filerevlog = self.file(fname)
1641 if not len(filerevlog):
1643 if not len(filerevlog):
1642 raise util.Abort(_("empty or missing revlog for %s") % fname)
1644 raise util.Abort(_("empty or missing revlog for %s") % fname)
1643 fstate[0] = fname
1645 fstate[0] = fname
1644 first = True
1646 first = True
1645 for chunk in filerevlog.group(gennodelst(filerevlog), bundler):
1647 for chunk in filerevlog.group(gennodelst(filerevlog), bundler):
1646 if first:
1648 if first:
1647 if chunk == bundler.close():
1649 if chunk == bundler.close():
1648 break
1650 break
1649 count[0] += 1
1651 count[0] += 1
1650 yield bundler.fileheader(fname)
1652 yield bundler.fileheader(fname)
1651 first = False
1653 first = False
1652 yield chunk
1654 yield chunk
1653 yield bundler.close()
1655 yield bundler.close()
1654 self.ui.progress(_('bundling'), None)
1656 self.ui.progress(_('bundling'), None)
1655
1657
1656 if nodes:
1658 if nodes:
1657 self.hook('outgoing', node=hex(nodes[0]), source=source)
1659 self.hook('outgoing', node=hex(nodes[0]), source=source)
1658
1660
1659 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1661 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1660
1662
1661 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1663 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1662 """Add the changegroup returned by source.read() to this repo.
1664 """Add the changegroup returned by source.read() to this repo.
1663 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1665 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1664 the URL of the repo where this changegroup is coming from.
1666 the URL of the repo where this changegroup is coming from.
1665 If lock is not None, the function takes ownership of the lock
1667 If lock is not None, the function takes ownership of the lock
1666 and releases it after the changegroup is added.
1668 and releases it after the changegroup is added.
1667
1669
1668 Return an integer summarizing the change to this repo:
1670 Return an integer summarizing the change to this repo:
1669 - nothing changed or no source: 0
1671 - nothing changed or no source: 0
1670 - more heads than before: 1+added heads (2..n)
1672 - more heads than before: 1+added heads (2..n)
1671 - fewer heads than before: -1-removed heads (-2..-n)
1673 - fewer heads than before: -1-removed heads (-2..-n)
1672 - number of heads stays the same: 1
1674 - number of heads stays the same: 1
1673 """
1675 """
1674 def csmap(x):
1676 def csmap(x):
1675 self.ui.debug("add changeset %s\n" % short(x))
1677 self.ui.debug("add changeset %s\n" % short(x))
1676 return len(cl)
1678 return len(cl)
1677
1679
1678 def revmap(x):
1680 def revmap(x):
1679 return cl.rev(x)
1681 return cl.rev(x)
1680
1682
1681 if not source:
1683 if not source:
1682 return 0
1684 return 0
1683
1685
1684 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1686 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1685
1687
1686 changesets = files = revisions = 0
1688 changesets = files = revisions = 0
1687 efiles = set()
1689 efiles = set()
1688
1690
1689 # write changelog data to temp files so concurrent readers will not see
1691 # write changelog data to temp files so concurrent readers will not see
1690 # inconsistent view
1692 # inconsistent view
1691 cl = self.changelog
1693 cl = self.changelog
1692 cl.delayupdate()
1694 cl.delayupdate()
1693 oldheads = cl.heads()
1695 oldheads = cl.heads()
1694
1696
1695 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1697 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1696 try:
1698 try:
1697 trp = weakref.proxy(tr)
1699 trp = weakref.proxy(tr)
1698 # pull off the changeset group
1700 # pull off the changeset group
1699 self.ui.status(_("adding changesets\n"))
1701 self.ui.status(_("adding changesets\n"))
1700 clstart = len(cl)
1702 clstart = len(cl)
1701 class prog(object):
1703 class prog(object):
1702 step = _('changesets')
1704 step = _('changesets')
1703 count = 1
1705 count = 1
1704 ui = self.ui
1706 ui = self.ui
1705 total = None
1707 total = None
1706 def __call__(self):
1708 def __call__(self):
1707 self.ui.progress(self.step, self.count, unit=_('chunks'),
1709 self.ui.progress(self.step, self.count, unit=_('chunks'),
1708 total=self.total)
1710 total=self.total)
1709 self.count += 1
1711 self.count += 1
1710 pr = prog()
1712 pr = prog()
1711 source.callback = pr
1713 source.callback = pr
1712
1714
1713 if (cl.addgroup(source, csmap, trp) is None
1715 if (cl.addgroup(source, csmap, trp) is None
1714 and not emptyok):
1716 and not emptyok):
1715 raise util.Abort(_("received changelog group is empty"))
1717 raise util.Abort(_("received changelog group is empty"))
1716 clend = len(cl)
1718 clend = len(cl)
1717 changesets = clend - clstart
1719 changesets = clend - clstart
1718 for c in xrange(clstart, clend):
1720 for c in xrange(clstart, clend):
1719 efiles.update(self[c].files())
1721 efiles.update(self[c].files())
1720 efiles = len(efiles)
1722 efiles = len(efiles)
1721 self.ui.progress(_('changesets'), None)
1723 self.ui.progress(_('changesets'), None)
1722
1724
1723 # pull off the manifest group
1725 # pull off the manifest group
1724 self.ui.status(_("adding manifests\n"))
1726 self.ui.status(_("adding manifests\n"))
1725 pr.step = _('manifests')
1727 pr.step = _('manifests')
1726 pr.count = 1
1728 pr.count = 1
1727 pr.total = changesets # manifests <= changesets
1729 pr.total = changesets # manifests <= changesets
1728 # no need to check for empty manifest group here:
1730 # no need to check for empty manifest group here:
1729 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1731 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1730 # no new manifest will be created and the manifest group will
1732 # no new manifest will be created and the manifest group will
1731 # be empty during the pull
1733 # be empty during the pull
1732 self.manifest.addgroup(source, revmap, trp)
1734 self.manifest.addgroup(source, revmap, trp)
1733 self.ui.progress(_('manifests'), None)
1735 self.ui.progress(_('manifests'), None)
1734
1736
1735 needfiles = {}
1737 needfiles = {}
1736 if self.ui.configbool('server', 'validate', default=False):
1738 if self.ui.configbool('server', 'validate', default=False):
1737 # validate incoming csets have their manifests
1739 # validate incoming csets have their manifests
1738 for cset in xrange(clstart, clend):
1740 for cset in xrange(clstart, clend):
1739 mfest = self.changelog.read(self.changelog.node(cset))[0]
1741 mfest = self.changelog.read(self.changelog.node(cset))[0]
1740 mfest = self.manifest.readdelta(mfest)
1742 mfest = self.manifest.readdelta(mfest)
1741 # store file nodes we must see
1743 # store file nodes we must see
1742 for f, n in mfest.iteritems():
1744 for f, n in mfest.iteritems():
1743 needfiles.setdefault(f, set()).add(n)
1745 needfiles.setdefault(f, set()).add(n)
1744
1746
1745 # process the files
1747 # process the files
1746 self.ui.status(_("adding file changes\n"))
1748 self.ui.status(_("adding file changes\n"))
1747 pr.step = 'files'
1749 pr.step = 'files'
1748 pr.count = 1
1750 pr.count = 1
1749 pr.total = efiles
1751 pr.total = efiles
1750 source.callback = None
1752 source.callback = None
1751
1753
1752 while 1:
1754 while 1:
1753 f = source.chunk()
1755 f = source.chunk()
1754 if not f:
1756 if not f:
1755 break
1757 break
1756 self.ui.debug("adding %s revisions\n" % f)
1758 self.ui.debug("adding %s revisions\n" % f)
1757 pr()
1759 pr()
1758 fl = self.file(f)
1760 fl = self.file(f)
1759 o = len(fl)
1761 o = len(fl)
1760 if fl.addgroup(source, revmap, trp) is None:
1762 if fl.addgroup(source, revmap, trp) is None:
1761 raise util.Abort(_("received file revlog group is empty"))
1763 raise util.Abort(_("received file revlog group is empty"))
1762 revisions += len(fl) - o
1764 revisions += len(fl) - o
1763 files += 1
1765 files += 1
1764 if f in needfiles:
1766 if f in needfiles:
1765 needs = needfiles[f]
1767 needs = needfiles[f]
1766 for new in xrange(o, len(fl)):
1768 for new in xrange(o, len(fl)):
1767 n = fl.node(new)
1769 n = fl.node(new)
1768 if n in needs:
1770 if n in needs:
1769 needs.remove(n)
1771 needs.remove(n)
1770 if not needs:
1772 if not needs:
1771 del needfiles[f]
1773 del needfiles[f]
1772 self.ui.progress(_('files'), None)
1774 self.ui.progress(_('files'), None)
1773
1775
1774 for f, needs in needfiles.iteritems():
1776 for f, needs in needfiles.iteritems():
1775 fl = self.file(f)
1777 fl = self.file(f)
1776 for n in needs:
1778 for n in needs:
1777 try:
1779 try:
1778 fl.rev(n)
1780 fl.rev(n)
1779 except error.LookupError:
1781 except error.LookupError:
1780 raise util.Abort(
1782 raise util.Abort(
1781 _('missing file data for %s:%s - run hg verify') %
1783 _('missing file data for %s:%s - run hg verify') %
1782 (f, hex(n)))
1784 (f, hex(n)))
1783
1785
1784 dh = 0
1786 dh = 0
1785 if oldheads:
1787 if oldheads:
1786 heads = cl.heads()
1788 heads = cl.heads()
1787 dh = len(heads) - len(oldheads)
1789 dh = len(heads) - len(oldheads)
1788 for h in heads:
1790 for h in heads:
1789 if h not in oldheads and 'close' in self[h].extra():
1791 if h not in oldheads and 'close' in self[h].extra():
1790 dh -= 1
1792 dh -= 1
1791 htext = ""
1793 htext = ""
1792 if dh:
1794 if dh:
1793 htext = _(" (%+d heads)") % dh
1795 htext = _(" (%+d heads)") % dh
1794
1796
1795 self.ui.status(_("added %d changesets"
1797 self.ui.status(_("added %d changesets"
1796 " with %d changes to %d files%s\n")
1798 " with %d changes to %d files%s\n")
1797 % (changesets, revisions, files, htext))
1799 % (changesets, revisions, files, htext))
1798
1800
1799 if changesets > 0:
1801 if changesets > 0:
1800 p = lambda: cl.writepending() and self.root or ""
1802 p = lambda: cl.writepending() and self.root or ""
1801 self.hook('pretxnchangegroup', throw=True,
1803 self.hook('pretxnchangegroup', throw=True,
1802 node=hex(cl.node(clstart)), source=srctype,
1804 node=hex(cl.node(clstart)), source=srctype,
1803 url=url, pending=p)
1805 url=url, pending=p)
1804
1806
1805 # make changelog see real files again
1807 # make changelog see real files again
1806 cl.finalize(trp)
1808 cl.finalize(trp)
1807
1809
1808 tr.close()
1810 tr.close()
1809 finally:
1811 finally:
1810 tr.release()
1812 tr.release()
1811 if lock:
1813 if lock:
1812 lock.release()
1814 lock.release()
1813
1815
1814 if changesets > 0:
1816 if changesets > 0:
1815 # forcefully update the on-disk branch cache
1817 # forcefully update the on-disk branch cache
1816 self.ui.debug("updating the branch cache\n")
1818 self.ui.debug("updating the branch cache\n")
1817 self.updatebranchcache()
1819 self.updatebranchcache()
1818 self.hook("changegroup", node=hex(cl.node(clstart)),
1820 self.hook("changegroup", node=hex(cl.node(clstart)),
1819 source=srctype, url=url)
1821 source=srctype, url=url)
1820
1822
1821 for i in xrange(clstart, clend):
1823 for i in xrange(clstart, clend):
1822 self.hook("incoming", node=hex(cl.node(i)),
1824 self.hook("incoming", node=hex(cl.node(i)),
1823 source=srctype, url=url)
1825 source=srctype, url=url)
1824
1826
1825 # never return 0 here:
1827 # never return 0 here:
1826 if dh < 0:
1828 if dh < 0:
1827 return dh - 1
1829 return dh - 1
1828 else:
1830 else:
1829 return dh + 1
1831 return dh + 1
1830
1832
1831 def stream_in(self, remote, requirements):
1833 def stream_in(self, remote, requirements):
1832 lock = self.lock()
1834 lock = self.lock()
1833 try:
1835 try:
1834 fp = remote.stream_out()
1836 fp = remote.stream_out()
1835 l = fp.readline()
1837 l = fp.readline()
1836 try:
1838 try:
1837 resp = int(l)
1839 resp = int(l)
1838 except ValueError:
1840 except ValueError:
1839 raise error.ResponseError(
1841 raise error.ResponseError(
1840 _('Unexpected response from remote server:'), l)
1842 _('Unexpected response from remote server:'), l)
1841 if resp == 1:
1843 if resp == 1:
1842 raise util.Abort(_('operation forbidden by server'))
1844 raise util.Abort(_('operation forbidden by server'))
1843 elif resp == 2:
1845 elif resp == 2:
1844 raise util.Abort(_('locking the remote repository failed'))
1846 raise util.Abort(_('locking the remote repository failed'))
1845 elif resp != 0:
1847 elif resp != 0:
1846 raise util.Abort(_('the server sent an unknown error code'))
1848 raise util.Abort(_('the server sent an unknown error code'))
1847 self.ui.status(_('streaming all changes\n'))
1849 self.ui.status(_('streaming all changes\n'))
1848 l = fp.readline()
1850 l = fp.readline()
1849 try:
1851 try:
1850 total_files, total_bytes = map(int, l.split(' ', 1))
1852 total_files, total_bytes = map(int, l.split(' ', 1))
1851 except (ValueError, TypeError):
1853 except (ValueError, TypeError):
1852 raise error.ResponseError(
1854 raise error.ResponseError(
1853 _('Unexpected response from remote server:'), l)
1855 _('Unexpected response from remote server:'), l)
1854 self.ui.status(_('%d files to transfer, %s of data\n') %
1856 self.ui.status(_('%d files to transfer, %s of data\n') %
1855 (total_files, util.bytecount(total_bytes)))
1857 (total_files, util.bytecount(total_bytes)))
1856 start = time.time()
1858 start = time.time()
1857 for i in xrange(total_files):
1859 for i in xrange(total_files):
1858 # XXX doesn't support '\n' or '\r' in filenames
1860 # XXX doesn't support '\n' or '\r' in filenames
1859 l = fp.readline()
1861 l = fp.readline()
1860 try:
1862 try:
1861 name, size = l.split('\0', 1)
1863 name, size = l.split('\0', 1)
1862 size = int(size)
1864 size = int(size)
1863 except (ValueError, TypeError):
1865 except (ValueError, TypeError):
1864 raise error.ResponseError(
1866 raise error.ResponseError(
1865 _('Unexpected response from remote server:'), l)
1867 _('Unexpected response from remote server:'), l)
1866 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1868 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1867 # for backwards compat, name was partially encoded
1869 # for backwards compat, name was partially encoded
1868 ofp = self.sopener(store.decodedir(name), 'w')
1870 ofp = self.sopener(store.decodedir(name), 'w')
1869 for chunk in util.filechunkiter(fp, limit=size):
1871 for chunk in util.filechunkiter(fp, limit=size):
1870 ofp.write(chunk)
1872 ofp.write(chunk)
1871 ofp.close()
1873 ofp.close()
1872 elapsed = time.time() - start
1874 elapsed = time.time() - start
1873 if elapsed <= 0:
1875 if elapsed <= 0:
1874 elapsed = 0.001
1876 elapsed = 0.001
1875 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1877 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1876 (util.bytecount(total_bytes), elapsed,
1878 (util.bytecount(total_bytes), elapsed,
1877 util.bytecount(total_bytes / elapsed)))
1879 util.bytecount(total_bytes / elapsed)))
1878
1880
1879 # new requirements = old non-format requirements + new format-related
1881 # new requirements = old non-format requirements + new format-related
1880 # requirements from the streamed-in repository
1882 # requirements from the streamed-in repository
1881 requirements.update(set(self.requirements) - self.supportedformats)
1883 requirements.update(set(self.requirements) - self.supportedformats)
1882 self._applyrequirements(requirements)
1884 self._applyrequirements(requirements)
1883 self._writerequirements()
1885 self._writerequirements()
1884
1886
1885 self.invalidate()
1887 self.invalidate()
1886 return len(self.heads()) + 1
1888 return len(self.heads()) + 1
1887 finally:
1889 finally:
1888 lock.release()
1890 lock.release()
1889
1891
1890 def clone(self, remote, heads=[], stream=False):
1892 def clone(self, remote, heads=[], stream=False):
1891 '''clone remote repository.
1893 '''clone remote repository.
1892
1894
1893 keyword arguments:
1895 keyword arguments:
1894 heads: list of revs to clone (forces use of pull)
1896 heads: list of revs to clone (forces use of pull)
1895 stream: use streaming clone if possible'''
1897 stream: use streaming clone if possible'''
1896
1898
1897 # now, all clients that can request uncompressed clones can
1899 # now, all clients that can request uncompressed clones can
1898 # read repo formats supported by all servers that can serve
1900 # read repo formats supported by all servers that can serve
1899 # them.
1901 # them.
1900
1902
1901 # if revlog format changes, client will have to check version
1903 # if revlog format changes, client will have to check version
1902 # and format flags on "stream" capability, and use
1904 # and format flags on "stream" capability, and use
1903 # uncompressed only if compatible.
1905 # uncompressed only if compatible.
1904
1906
1905 if stream and not heads:
1907 if stream and not heads:
1906 # 'stream' means remote revlog format is revlogv1 only
1908 # 'stream' means remote revlog format is revlogv1 only
1907 if remote.capable('stream'):
1909 if remote.capable('stream'):
1908 return self.stream_in(remote, set(('revlogv1',)))
1910 return self.stream_in(remote, set(('revlogv1',)))
1909 # otherwise, 'streamreqs' contains the remote revlog format
1911 # otherwise, 'streamreqs' contains the remote revlog format
1910 streamreqs = remote.capable('streamreqs')
1912 streamreqs = remote.capable('streamreqs')
1911 if streamreqs:
1913 if streamreqs:
1912 streamreqs = set(streamreqs.split(','))
1914 streamreqs = set(streamreqs.split(','))
1913 # if we support it, stream in and adjust our requirements
1915 # if we support it, stream in and adjust our requirements
1914 if not streamreqs - self.supportedformats:
1916 if not streamreqs - self.supportedformats:
1915 return self.stream_in(remote, streamreqs)
1917 return self.stream_in(remote, streamreqs)
1916 return self.pull(remote, heads)
1918 return self.pull(remote, heads)
1917
1919
1918 def pushkey(self, namespace, key, old, new):
1920 def pushkey(self, namespace, key, old, new):
1919 return pushkey.push(self, namespace, key, old, new)
1921 return pushkey.push(self, namespace, key, old, new)
1920
1922
1921 def listkeys(self, namespace):
1923 def listkeys(self, namespace):
1922 return pushkey.list(self, namespace)
1924 return pushkey.list(self, namespace)
1923
1925
1924 def debugwireargs(self, one, two, three=None, four=None, five=None):
1926 def debugwireargs(self, one, two, three=None, four=None, five=None):
1925 '''used to test argument passing over the wire'''
1927 '''used to test argument passing over the wire'''
1926 return "%s %s %s %s %s" % (one, two, three, four, five)
1928 return "%s %s %s %s %s" % (one, two, three, four, five)
1927
1929
1928 # used to avoid circular references so destructors work
1930 # used to avoid circular references so destructors work
1929 def aftertrans(files):
1931 def aftertrans(files):
1930 renamefiles = [tuple(t) for t in files]
1932 renamefiles = [tuple(t) for t in files]
1931 def a():
1933 def a():
1932 for src, dest in renamefiles:
1934 for src, dest in renamefiles:
1933 util.rename(src, dest)
1935 util.rename(src, dest)
1934 return a
1936 return a
1935
1937
1936 def instance(ui, path, create):
1938 def instance(ui, path, create):
1937 return localrepository(ui, urlmod.localpath(path), create)
1939 return localrepository(ui, urlmod.localpath(path), create)
1938
1940
1939 def islocal(path):
1941 def islocal(path):
1940 return True
1942 return True
General Comments 0
You need to be logged in to leave comments. Login now