##// END OF EJS Templates
changegroup: add first logic to send file header...
Matt Mackall -
r13809:e6f79549 default
parent child Browse files
Show More
@@ -1,1944 +1,1947 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks
11 import changelog, dirstate, filelog, manifest, context, bookmarks
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 import url as urlmod
17 import url as urlmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 propertycache = util.propertycache
20 propertycache = util.propertycache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 'known', 'getbundle'))
24 'known', 'getbundle'))
25 supportedformats = set(('revlogv1', 'parentdelta'))
25 supportedformats = set(('revlogv1', 'parentdelta'))
26 supported = supportedformats | set(('store', 'fncache', 'shared',
26 supported = supportedformats | set(('store', 'fncache', 'shared',
27 'dotencode'))
27 'dotencode'))
28
28
29 def __init__(self, baseui, path=None, create=0):
29 def __init__(self, baseui, path=None, create=0):
30 repo.repository.__init__(self)
30 repo.repository.__init__(self)
31 self.root = os.path.realpath(util.expandpath(path))
31 self.root = os.path.realpath(util.expandpath(path))
32 self.path = os.path.join(self.root, ".hg")
32 self.path = os.path.join(self.root, ".hg")
33 self.origroot = path
33 self.origroot = path
34 self.auditor = util.path_auditor(self.root, self._checknested)
34 self.auditor = util.path_auditor(self.root, self._checknested)
35 self.opener = util.opener(self.path)
35 self.opener = util.opener(self.path)
36 self.wopener = util.opener(self.root)
36 self.wopener = util.opener(self.root)
37 self.baseui = baseui
37 self.baseui = baseui
38 self.ui = baseui.copy()
38 self.ui = baseui.copy()
39
39
40 try:
40 try:
41 self.ui.readconfig(self.join("hgrc"), self.root)
41 self.ui.readconfig(self.join("hgrc"), self.root)
42 extensions.loadall(self.ui)
42 extensions.loadall(self.ui)
43 except IOError:
43 except IOError:
44 pass
44 pass
45
45
46 if not os.path.isdir(self.path):
46 if not os.path.isdir(self.path):
47 if create:
47 if create:
48 if not os.path.exists(path):
48 if not os.path.exists(path):
49 util.makedirs(path)
49 util.makedirs(path)
50 util.makedir(self.path, notindexed=True)
50 util.makedir(self.path, notindexed=True)
51 requirements = ["revlogv1"]
51 requirements = ["revlogv1"]
52 if self.ui.configbool('format', 'usestore', True):
52 if self.ui.configbool('format', 'usestore', True):
53 os.mkdir(os.path.join(self.path, "store"))
53 os.mkdir(os.path.join(self.path, "store"))
54 requirements.append("store")
54 requirements.append("store")
55 if self.ui.configbool('format', 'usefncache', True):
55 if self.ui.configbool('format', 'usefncache', True):
56 requirements.append("fncache")
56 requirements.append("fncache")
57 if self.ui.configbool('format', 'dotencode', True):
57 if self.ui.configbool('format', 'dotencode', True):
58 requirements.append('dotencode')
58 requirements.append('dotencode')
59 # create an invalid changelog
59 # create an invalid changelog
60 self.opener("00changelog.i", "a").write(
60 self.opener("00changelog.i", "a").write(
61 '\0\0\0\2' # represents revlogv2
61 '\0\0\0\2' # represents revlogv2
62 ' dummy changelog to prevent using the old repo layout'
62 ' dummy changelog to prevent using the old repo layout'
63 )
63 )
64 if self.ui.configbool('format', 'parentdelta', False):
64 if self.ui.configbool('format', 'parentdelta', False):
65 requirements.append("parentdelta")
65 requirements.append("parentdelta")
66 else:
66 else:
67 raise error.RepoError(_("repository %s not found") % path)
67 raise error.RepoError(_("repository %s not found") % path)
68 elif create:
68 elif create:
69 raise error.RepoError(_("repository %s already exists") % path)
69 raise error.RepoError(_("repository %s already exists") % path)
70 else:
70 else:
71 # find requirements
71 # find requirements
72 requirements = set()
72 requirements = set()
73 try:
73 try:
74 requirements = set(self.opener("requires").read().splitlines())
74 requirements = set(self.opener("requires").read().splitlines())
75 except IOError, inst:
75 except IOError, inst:
76 if inst.errno != errno.ENOENT:
76 if inst.errno != errno.ENOENT:
77 raise
77 raise
78 for r in requirements - self.supported:
78 for r in requirements - self.supported:
79 raise error.RequirementError(
79 raise error.RequirementError(
80 _("requirement '%s' not supported") % r)
80 _("requirement '%s' not supported") % r)
81
81
82 self.sharedpath = self.path
82 self.sharedpath = self.path
83 try:
83 try:
84 s = os.path.realpath(self.opener("sharedpath").read())
84 s = os.path.realpath(self.opener("sharedpath").read())
85 if not os.path.exists(s):
85 if not os.path.exists(s):
86 raise error.RepoError(
86 raise error.RepoError(
87 _('.hg/sharedpath points to nonexistent directory %s') % s)
87 _('.hg/sharedpath points to nonexistent directory %s') % s)
88 self.sharedpath = s
88 self.sharedpath = s
89 except IOError, inst:
89 except IOError, inst:
90 if inst.errno != errno.ENOENT:
90 if inst.errno != errno.ENOENT:
91 raise
91 raise
92
92
93 self.store = store.store(requirements, self.sharedpath, util.opener)
93 self.store = store.store(requirements, self.sharedpath, util.opener)
94 self.spath = self.store.path
94 self.spath = self.store.path
95 self.sopener = self.store.opener
95 self.sopener = self.store.opener
96 self.sjoin = self.store.join
96 self.sjoin = self.store.join
97 self.opener.createmode = self.store.createmode
97 self.opener.createmode = self.store.createmode
98 self._applyrequirements(requirements)
98 self._applyrequirements(requirements)
99 if create:
99 if create:
100 self._writerequirements()
100 self._writerequirements()
101
101
102 # These two define the set of tags for this repository. _tags
102 # These two define the set of tags for this repository. _tags
103 # maps tag name to node; _tagtypes maps tag name to 'global' or
103 # maps tag name to node; _tagtypes maps tag name to 'global' or
104 # 'local'. (Global tags are defined by .hgtags across all
104 # 'local'. (Global tags are defined by .hgtags across all
105 # heads, and local tags are defined in .hg/localtags.) They
105 # heads, and local tags are defined in .hg/localtags.) They
106 # constitute the in-memory cache of tags.
106 # constitute the in-memory cache of tags.
107 self._tags = None
107 self._tags = None
108 self._tagtypes = None
108 self._tagtypes = None
109
109
110 self._branchcache = None
110 self._branchcache = None
111 self._branchcachetip = None
111 self._branchcachetip = None
112 self.nodetagscache = None
112 self.nodetagscache = None
113 self.filterpats = {}
113 self.filterpats = {}
114 self._datafilters = {}
114 self._datafilters = {}
115 self._transref = self._lockref = self._wlockref = None
115 self._transref = self._lockref = self._wlockref = None
116
116
117 def _applyrequirements(self, requirements):
117 def _applyrequirements(self, requirements):
118 self.requirements = requirements
118 self.requirements = requirements
119 self.sopener.options = {}
119 self.sopener.options = {}
120 if 'parentdelta' in requirements:
120 if 'parentdelta' in requirements:
121 self.sopener.options['parentdelta'] = 1
121 self.sopener.options['parentdelta'] = 1
122
122
123 def _writerequirements(self):
123 def _writerequirements(self):
124 reqfile = self.opener("requires", "w")
124 reqfile = self.opener("requires", "w")
125 for r in self.requirements:
125 for r in self.requirements:
126 reqfile.write("%s\n" % r)
126 reqfile.write("%s\n" % r)
127 reqfile.close()
127 reqfile.close()
128
128
129 def _checknested(self, path):
129 def _checknested(self, path):
130 """Determine if path is a legal nested repository."""
130 """Determine if path is a legal nested repository."""
131 if not path.startswith(self.root):
131 if not path.startswith(self.root):
132 return False
132 return False
133 subpath = path[len(self.root) + 1:]
133 subpath = path[len(self.root) + 1:]
134
134
135 # XXX: Checking against the current working copy is wrong in
135 # XXX: Checking against the current working copy is wrong in
136 # the sense that it can reject things like
136 # the sense that it can reject things like
137 #
137 #
138 # $ hg cat -r 10 sub/x.txt
138 # $ hg cat -r 10 sub/x.txt
139 #
139 #
140 # if sub/ is no longer a subrepository in the working copy
140 # if sub/ is no longer a subrepository in the working copy
141 # parent revision.
141 # parent revision.
142 #
142 #
143 # However, it can of course also allow things that would have
143 # However, it can of course also allow things that would have
144 # been rejected before, such as the above cat command if sub/
144 # been rejected before, such as the above cat command if sub/
145 # is a subrepository now, but was a normal directory before.
145 # is a subrepository now, but was a normal directory before.
146 # The old path auditor would have rejected by mistake since it
146 # The old path auditor would have rejected by mistake since it
147 # panics when it sees sub/.hg/.
147 # panics when it sees sub/.hg/.
148 #
148 #
149 # All in all, checking against the working copy seems sensible
149 # All in all, checking against the working copy seems sensible
150 # since we want to prevent access to nested repositories on
150 # since we want to prevent access to nested repositories on
151 # the filesystem *now*.
151 # the filesystem *now*.
152 ctx = self[None]
152 ctx = self[None]
153 parts = util.splitpath(subpath)
153 parts = util.splitpath(subpath)
154 while parts:
154 while parts:
155 prefix = os.sep.join(parts)
155 prefix = os.sep.join(parts)
156 if prefix in ctx.substate:
156 if prefix in ctx.substate:
157 if prefix == subpath:
157 if prefix == subpath:
158 return True
158 return True
159 else:
159 else:
160 sub = ctx.sub(prefix)
160 sub = ctx.sub(prefix)
161 return sub.checknested(subpath[len(prefix) + 1:])
161 return sub.checknested(subpath[len(prefix) + 1:])
162 else:
162 else:
163 parts.pop()
163 parts.pop()
164 return False
164 return False
165
165
166 @util.propertycache
166 @util.propertycache
167 def _bookmarks(self):
167 def _bookmarks(self):
168 return bookmarks.read(self)
168 return bookmarks.read(self)
169
169
170 @util.propertycache
170 @util.propertycache
171 def _bookmarkcurrent(self):
171 def _bookmarkcurrent(self):
172 return bookmarks.readcurrent(self)
172 return bookmarks.readcurrent(self)
173
173
174 @propertycache
174 @propertycache
175 def changelog(self):
175 def changelog(self):
176 c = changelog.changelog(self.sopener)
176 c = changelog.changelog(self.sopener)
177 if 'HG_PENDING' in os.environ:
177 if 'HG_PENDING' in os.environ:
178 p = os.environ['HG_PENDING']
178 p = os.environ['HG_PENDING']
179 if p.startswith(self.root):
179 if p.startswith(self.root):
180 c.readpending('00changelog.i.a')
180 c.readpending('00changelog.i.a')
181 self.sopener.options['defversion'] = c.version
181 self.sopener.options['defversion'] = c.version
182 return c
182 return c
183
183
184 @propertycache
184 @propertycache
185 def manifest(self):
185 def manifest(self):
186 return manifest.manifest(self.sopener)
186 return manifest.manifest(self.sopener)
187
187
188 @propertycache
188 @propertycache
189 def dirstate(self):
189 def dirstate(self):
190 warned = [0]
190 warned = [0]
191 def validate(node):
191 def validate(node):
192 try:
192 try:
193 r = self.changelog.rev(node)
193 r = self.changelog.rev(node)
194 return node
194 return node
195 except error.LookupError:
195 except error.LookupError:
196 if not warned[0]:
196 if not warned[0]:
197 warned[0] = True
197 warned[0] = True
198 self.ui.warn(_("warning: ignoring unknown"
198 self.ui.warn(_("warning: ignoring unknown"
199 " working parent %s!\n") % short(node))
199 " working parent %s!\n") % short(node))
200 return nullid
200 return nullid
201
201
202 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
202 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
203
203
204 def __getitem__(self, changeid):
204 def __getitem__(self, changeid):
205 if changeid is None:
205 if changeid is None:
206 return context.workingctx(self)
206 return context.workingctx(self)
207 return context.changectx(self, changeid)
207 return context.changectx(self, changeid)
208
208
209 def __contains__(self, changeid):
209 def __contains__(self, changeid):
210 try:
210 try:
211 return bool(self.lookup(changeid))
211 return bool(self.lookup(changeid))
212 except error.RepoLookupError:
212 except error.RepoLookupError:
213 return False
213 return False
214
214
215 def __nonzero__(self):
215 def __nonzero__(self):
216 return True
216 return True
217
217
218 def __len__(self):
218 def __len__(self):
219 return len(self.changelog)
219 return len(self.changelog)
220
220
221 def __iter__(self):
221 def __iter__(self):
222 for i in xrange(len(self)):
222 for i in xrange(len(self)):
223 yield i
223 yield i
224
224
225 def url(self):
225 def url(self):
226 return 'file:' + self.root
226 return 'file:' + self.root
227
227
228 def hook(self, name, throw=False, **args):
228 def hook(self, name, throw=False, **args):
229 return hook.hook(self.ui, self, name, throw, **args)
229 return hook.hook(self.ui, self, name, throw, **args)
230
230
231 tag_disallowed = ':\r\n'
231 tag_disallowed = ':\r\n'
232
232
233 def _tag(self, names, node, message, local, user, date, extra={}):
233 def _tag(self, names, node, message, local, user, date, extra={}):
234 if isinstance(names, str):
234 if isinstance(names, str):
235 allchars = names
235 allchars = names
236 names = (names,)
236 names = (names,)
237 else:
237 else:
238 allchars = ''.join(names)
238 allchars = ''.join(names)
239 for c in self.tag_disallowed:
239 for c in self.tag_disallowed:
240 if c in allchars:
240 if c in allchars:
241 raise util.Abort(_('%r cannot be used in a tag name') % c)
241 raise util.Abort(_('%r cannot be used in a tag name') % c)
242
242
243 branches = self.branchmap()
243 branches = self.branchmap()
244 for name in names:
244 for name in names:
245 self.hook('pretag', throw=True, node=hex(node), tag=name,
245 self.hook('pretag', throw=True, node=hex(node), tag=name,
246 local=local)
246 local=local)
247 if name in branches:
247 if name in branches:
248 self.ui.warn(_("warning: tag %s conflicts with existing"
248 self.ui.warn(_("warning: tag %s conflicts with existing"
249 " branch name\n") % name)
249 " branch name\n") % name)
250
250
251 def writetags(fp, names, munge, prevtags):
251 def writetags(fp, names, munge, prevtags):
252 fp.seek(0, 2)
252 fp.seek(0, 2)
253 if prevtags and prevtags[-1] != '\n':
253 if prevtags and prevtags[-1] != '\n':
254 fp.write('\n')
254 fp.write('\n')
255 for name in names:
255 for name in names:
256 m = munge and munge(name) or name
256 m = munge and munge(name) or name
257 if self._tagtypes and name in self._tagtypes:
257 if self._tagtypes and name in self._tagtypes:
258 old = self._tags.get(name, nullid)
258 old = self._tags.get(name, nullid)
259 fp.write('%s %s\n' % (hex(old), m))
259 fp.write('%s %s\n' % (hex(old), m))
260 fp.write('%s %s\n' % (hex(node), m))
260 fp.write('%s %s\n' % (hex(node), m))
261 fp.close()
261 fp.close()
262
262
263 prevtags = ''
263 prevtags = ''
264 if local:
264 if local:
265 try:
265 try:
266 fp = self.opener('localtags', 'r+')
266 fp = self.opener('localtags', 'r+')
267 except IOError:
267 except IOError:
268 fp = self.opener('localtags', 'a')
268 fp = self.opener('localtags', 'a')
269 else:
269 else:
270 prevtags = fp.read()
270 prevtags = fp.read()
271
271
272 # local tags are stored in the current charset
272 # local tags are stored in the current charset
273 writetags(fp, names, None, prevtags)
273 writetags(fp, names, None, prevtags)
274 for name in names:
274 for name in names:
275 self.hook('tag', node=hex(node), tag=name, local=local)
275 self.hook('tag', node=hex(node), tag=name, local=local)
276 return
276 return
277
277
278 try:
278 try:
279 fp = self.wfile('.hgtags', 'rb+')
279 fp = self.wfile('.hgtags', 'rb+')
280 except IOError:
280 except IOError:
281 fp = self.wfile('.hgtags', 'ab')
281 fp = self.wfile('.hgtags', 'ab')
282 else:
282 else:
283 prevtags = fp.read()
283 prevtags = fp.read()
284
284
285 # committed tags are stored in UTF-8
285 # committed tags are stored in UTF-8
286 writetags(fp, names, encoding.fromlocal, prevtags)
286 writetags(fp, names, encoding.fromlocal, prevtags)
287
287
288 fp.close()
288 fp.close()
289
289
290 if '.hgtags' not in self.dirstate:
290 if '.hgtags' not in self.dirstate:
291 self[None].add(['.hgtags'])
291 self[None].add(['.hgtags'])
292
292
293 m = matchmod.exact(self.root, '', ['.hgtags'])
293 m = matchmod.exact(self.root, '', ['.hgtags'])
294 tagnode = self.commit(message, user, date, extra=extra, match=m)
294 tagnode = self.commit(message, user, date, extra=extra, match=m)
295
295
296 for name in names:
296 for name in names:
297 self.hook('tag', node=hex(node), tag=name, local=local)
297 self.hook('tag', node=hex(node), tag=name, local=local)
298
298
299 return tagnode
299 return tagnode
300
300
301 def tag(self, names, node, message, local, user, date):
301 def tag(self, names, node, message, local, user, date):
302 '''tag a revision with one or more symbolic names.
302 '''tag a revision with one or more symbolic names.
303
303
304 names is a list of strings or, when adding a single tag, names may be a
304 names is a list of strings or, when adding a single tag, names may be a
305 string.
305 string.
306
306
307 if local is True, the tags are stored in a per-repository file.
307 if local is True, the tags are stored in a per-repository file.
308 otherwise, they are stored in the .hgtags file, and a new
308 otherwise, they are stored in the .hgtags file, and a new
309 changeset is committed with the change.
309 changeset is committed with the change.
310
310
311 keyword arguments:
311 keyword arguments:
312
312
313 local: whether to store tags in non-version-controlled file
313 local: whether to store tags in non-version-controlled file
314 (default False)
314 (default False)
315
315
316 message: commit message to use if committing
316 message: commit message to use if committing
317
317
318 user: name of user to use if committing
318 user: name of user to use if committing
319
319
320 date: date tuple to use if committing'''
320 date: date tuple to use if committing'''
321
321
322 if not local:
322 if not local:
323 for x in self.status()[:5]:
323 for x in self.status()[:5]:
324 if '.hgtags' in x:
324 if '.hgtags' in x:
325 raise util.Abort(_('working copy of .hgtags is changed '
325 raise util.Abort(_('working copy of .hgtags is changed '
326 '(please commit .hgtags manually)'))
326 '(please commit .hgtags manually)'))
327
327
328 self.tags() # instantiate the cache
328 self.tags() # instantiate the cache
329 self._tag(names, node, message, local, user, date)
329 self._tag(names, node, message, local, user, date)
330
330
331 def tags(self):
331 def tags(self):
332 '''return a mapping of tag to node'''
332 '''return a mapping of tag to node'''
333 if self._tags is None:
333 if self._tags is None:
334 (self._tags, self._tagtypes) = self._findtags()
334 (self._tags, self._tagtypes) = self._findtags()
335
335
336 return self._tags
336 return self._tags
337
337
338 def _findtags(self):
338 def _findtags(self):
339 '''Do the hard work of finding tags. Return a pair of dicts
339 '''Do the hard work of finding tags. Return a pair of dicts
340 (tags, tagtypes) where tags maps tag name to node, and tagtypes
340 (tags, tagtypes) where tags maps tag name to node, and tagtypes
341 maps tag name to a string like \'global\' or \'local\'.
341 maps tag name to a string like \'global\' or \'local\'.
342 Subclasses or extensions are free to add their own tags, but
342 Subclasses or extensions are free to add their own tags, but
343 should be aware that the returned dicts will be retained for the
343 should be aware that the returned dicts will be retained for the
344 duration of the localrepo object.'''
344 duration of the localrepo object.'''
345
345
346 # XXX what tagtype should subclasses/extensions use? Currently
346 # XXX what tagtype should subclasses/extensions use? Currently
347 # mq and bookmarks add tags, but do not set the tagtype at all.
347 # mq and bookmarks add tags, but do not set the tagtype at all.
348 # Should each extension invent its own tag type? Should there
348 # Should each extension invent its own tag type? Should there
349 # be one tagtype for all such "virtual" tags? Or is the status
349 # be one tagtype for all such "virtual" tags? Or is the status
350 # quo fine?
350 # quo fine?
351
351
352 alltags = {} # map tag name to (node, hist)
352 alltags = {} # map tag name to (node, hist)
353 tagtypes = {}
353 tagtypes = {}
354
354
355 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
355 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
356 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
356 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
357
357
358 # Build the return dicts. Have to re-encode tag names because
358 # Build the return dicts. Have to re-encode tag names because
359 # the tags module always uses UTF-8 (in order not to lose info
359 # the tags module always uses UTF-8 (in order not to lose info
360 # writing to the cache), but the rest of Mercurial wants them in
360 # writing to the cache), but the rest of Mercurial wants them in
361 # local encoding.
361 # local encoding.
362 tags = {}
362 tags = {}
363 for (name, (node, hist)) in alltags.iteritems():
363 for (name, (node, hist)) in alltags.iteritems():
364 if node != nullid:
364 if node != nullid:
365 tags[encoding.tolocal(name)] = node
365 tags[encoding.tolocal(name)] = node
366 tags['tip'] = self.changelog.tip()
366 tags['tip'] = self.changelog.tip()
367 tagtypes = dict([(encoding.tolocal(name), value)
367 tagtypes = dict([(encoding.tolocal(name), value)
368 for (name, value) in tagtypes.iteritems()])
368 for (name, value) in tagtypes.iteritems()])
369 return (tags, tagtypes)
369 return (tags, tagtypes)
370
370
371 def tagtype(self, tagname):
371 def tagtype(self, tagname):
372 '''
372 '''
373 return the type of the given tag. result can be:
373 return the type of the given tag. result can be:
374
374
375 'local' : a local tag
375 'local' : a local tag
376 'global' : a global tag
376 'global' : a global tag
377 None : tag does not exist
377 None : tag does not exist
378 '''
378 '''
379
379
380 self.tags()
380 self.tags()
381
381
382 return self._tagtypes.get(tagname)
382 return self._tagtypes.get(tagname)
383
383
384 def tagslist(self):
384 def tagslist(self):
385 '''return a list of tags ordered by revision'''
385 '''return a list of tags ordered by revision'''
386 l = []
386 l = []
387 for t, n in self.tags().iteritems():
387 for t, n in self.tags().iteritems():
388 try:
388 try:
389 r = self.changelog.rev(n)
389 r = self.changelog.rev(n)
390 except:
390 except:
391 r = -2 # sort to the beginning of the list if unknown
391 r = -2 # sort to the beginning of the list if unknown
392 l.append((r, t, n))
392 l.append((r, t, n))
393 return [(t, n) for r, t, n in sorted(l)]
393 return [(t, n) for r, t, n in sorted(l)]
394
394
395 def nodetags(self, node):
395 def nodetags(self, node):
396 '''return the tags associated with a node'''
396 '''return the tags associated with a node'''
397 if not self.nodetagscache:
397 if not self.nodetagscache:
398 self.nodetagscache = {}
398 self.nodetagscache = {}
399 for t, n in self.tags().iteritems():
399 for t, n in self.tags().iteritems():
400 self.nodetagscache.setdefault(n, []).append(t)
400 self.nodetagscache.setdefault(n, []).append(t)
401 for tags in self.nodetagscache.itervalues():
401 for tags in self.nodetagscache.itervalues():
402 tags.sort()
402 tags.sort()
403 return self.nodetagscache.get(node, [])
403 return self.nodetagscache.get(node, [])
404
404
405 def nodebookmarks(self, node):
405 def nodebookmarks(self, node):
406 marks = []
406 marks = []
407 for bookmark, n in self._bookmarks.iteritems():
407 for bookmark, n in self._bookmarks.iteritems():
408 if n == node:
408 if n == node:
409 marks.append(bookmark)
409 marks.append(bookmark)
410 return sorted(marks)
410 return sorted(marks)
411
411
412 def _branchtags(self, partial, lrev):
412 def _branchtags(self, partial, lrev):
413 # TODO: rename this function?
413 # TODO: rename this function?
414 tiprev = len(self) - 1
414 tiprev = len(self) - 1
415 if lrev != tiprev:
415 if lrev != tiprev:
416 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
416 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
417 self._updatebranchcache(partial, ctxgen)
417 self._updatebranchcache(partial, ctxgen)
418 self._writebranchcache(partial, self.changelog.tip(), tiprev)
418 self._writebranchcache(partial, self.changelog.tip(), tiprev)
419
419
420 return partial
420 return partial
421
421
422 def updatebranchcache(self):
422 def updatebranchcache(self):
423 tip = self.changelog.tip()
423 tip = self.changelog.tip()
424 if self._branchcache is not None and self._branchcachetip == tip:
424 if self._branchcache is not None and self._branchcachetip == tip:
425 return self._branchcache
425 return self._branchcache
426
426
427 oldtip = self._branchcachetip
427 oldtip = self._branchcachetip
428 self._branchcachetip = tip
428 self._branchcachetip = tip
429 if oldtip is None or oldtip not in self.changelog.nodemap:
429 if oldtip is None or oldtip not in self.changelog.nodemap:
430 partial, last, lrev = self._readbranchcache()
430 partial, last, lrev = self._readbranchcache()
431 else:
431 else:
432 lrev = self.changelog.rev(oldtip)
432 lrev = self.changelog.rev(oldtip)
433 partial = self._branchcache
433 partial = self._branchcache
434
434
435 self._branchtags(partial, lrev)
435 self._branchtags(partial, lrev)
436 # this private cache holds all heads (not just tips)
436 # this private cache holds all heads (not just tips)
437 self._branchcache = partial
437 self._branchcache = partial
438
438
439 def branchmap(self):
439 def branchmap(self):
440 '''returns a dictionary {branch: [branchheads]}'''
440 '''returns a dictionary {branch: [branchheads]}'''
441 self.updatebranchcache()
441 self.updatebranchcache()
442 return self._branchcache
442 return self._branchcache
443
443
444 def branchtags(self):
444 def branchtags(self):
445 '''return a dict where branch names map to the tipmost head of
445 '''return a dict where branch names map to the tipmost head of
446 the branch, open heads come before closed'''
446 the branch, open heads come before closed'''
447 bt = {}
447 bt = {}
448 for bn, heads in self.branchmap().iteritems():
448 for bn, heads in self.branchmap().iteritems():
449 tip = heads[-1]
449 tip = heads[-1]
450 for h in reversed(heads):
450 for h in reversed(heads):
451 if 'close' not in self.changelog.read(h)[5]:
451 if 'close' not in self.changelog.read(h)[5]:
452 tip = h
452 tip = h
453 break
453 break
454 bt[bn] = tip
454 bt[bn] = tip
455 return bt
455 return bt
456
456
457 def _readbranchcache(self):
457 def _readbranchcache(self):
458 partial = {}
458 partial = {}
459 try:
459 try:
460 f = self.opener("cache/branchheads")
460 f = self.opener("cache/branchheads")
461 lines = f.read().split('\n')
461 lines = f.read().split('\n')
462 f.close()
462 f.close()
463 except (IOError, OSError):
463 except (IOError, OSError):
464 return {}, nullid, nullrev
464 return {}, nullid, nullrev
465
465
466 try:
466 try:
467 last, lrev = lines.pop(0).split(" ", 1)
467 last, lrev = lines.pop(0).split(" ", 1)
468 last, lrev = bin(last), int(lrev)
468 last, lrev = bin(last), int(lrev)
469 if lrev >= len(self) or self[lrev].node() != last:
469 if lrev >= len(self) or self[lrev].node() != last:
470 # invalidate the cache
470 # invalidate the cache
471 raise ValueError('invalidating branch cache (tip differs)')
471 raise ValueError('invalidating branch cache (tip differs)')
472 for l in lines:
472 for l in lines:
473 if not l:
473 if not l:
474 continue
474 continue
475 node, label = l.split(" ", 1)
475 node, label = l.split(" ", 1)
476 label = encoding.tolocal(label.strip())
476 label = encoding.tolocal(label.strip())
477 partial.setdefault(label, []).append(bin(node))
477 partial.setdefault(label, []).append(bin(node))
478 except KeyboardInterrupt:
478 except KeyboardInterrupt:
479 raise
479 raise
480 except Exception, inst:
480 except Exception, inst:
481 if self.ui.debugflag:
481 if self.ui.debugflag:
482 self.ui.warn(str(inst), '\n')
482 self.ui.warn(str(inst), '\n')
483 partial, last, lrev = {}, nullid, nullrev
483 partial, last, lrev = {}, nullid, nullrev
484 return partial, last, lrev
484 return partial, last, lrev
485
485
486 def _writebranchcache(self, branches, tip, tiprev):
486 def _writebranchcache(self, branches, tip, tiprev):
487 try:
487 try:
488 f = self.opener("cache/branchheads", "w", atomictemp=True)
488 f = self.opener("cache/branchheads", "w", atomictemp=True)
489 f.write("%s %s\n" % (hex(tip), tiprev))
489 f.write("%s %s\n" % (hex(tip), tiprev))
490 for label, nodes in branches.iteritems():
490 for label, nodes in branches.iteritems():
491 for node in nodes:
491 for node in nodes:
492 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
492 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
493 f.rename()
493 f.rename()
494 except (IOError, OSError):
494 except (IOError, OSError):
495 pass
495 pass
496
496
497 def _updatebranchcache(self, partial, ctxgen):
497 def _updatebranchcache(self, partial, ctxgen):
498 # collect new branch entries
498 # collect new branch entries
499 newbranches = {}
499 newbranches = {}
500 for c in ctxgen:
500 for c in ctxgen:
501 newbranches.setdefault(c.branch(), []).append(c.node())
501 newbranches.setdefault(c.branch(), []).append(c.node())
502 # if older branchheads are reachable from new ones, they aren't
502 # if older branchheads are reachable from new ones, they aren't
503 # really branchheads. Note checking parents is insufficient:
503 # really branchheads. Note checking parents is insufficient:
504 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
504 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
505 for branch, newnodes in newbranches.iteritems():
505 for branch, newnodes in newbranches.iteritems():
506 bheads = partial.setdefault(branch, [])
506 bheads = partial.setdefault(branch, [])
507 bheads.extend(newnodes)
507 bheads.extend(newnodes)
508 if len(bheads) <= 1:
508 if len(bheads) <= 1:
509 continue
509 continue
510 # starting from tip means fewer passes over reachable
510 # starting from tip means fewer passes over reachable
511 while newnodes:
511 while newnodes:
512 latest = newnodes.pop()
512 latest = newnodes.pop()
513 if latest not in bheads:
513 if latest not in bheads:
514 continue
514 continue
515 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
515 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
516 reachable = self.changelog.reachable(latest, minbhrev)
516 reachable = self.changelog.reachable(latest, minbhrev)
517 reachable.remove(latest)
517 reachable.remove(latest)
518 bheads = [b for b in bheads if b not in reachable]
518 bheads = [b for b in bheads if b not in reachable]
519 partial[branch] = bheads
519 partial[branch] = bheads
520
520
521 def lookup(self, key):
521 def lookup(self, key):
522 if isinstance(key, int):
522 if isinstance(key, int):
523 return self.changelog.node(key)
523 return self.changelog.node(key)
524 elif key == '.':
524 elif key == '.':
525 return self.dirstate.parents()[0]
525 return self.dirstate.parents()[0]
526 elif key == 'null':
526 elif key == 'null':
527 return nullid
527 return nullid
528 elif key == 'tip':
528 elif key == 'tip':
529 return self.changelog.tip()
529 return self.changelog.tip()
530 n = self.changelog._match(key)
530 n = self.changelog._match(key)
531 if n:
531 if n:
532 return n
532 return n
533 if key in self._bookmarks:
533 if key in self._bookmarks:
534 return self._bookmarks[key]
534 return self._bookmarks[key]
535 if key in self.tags():
535 if key in self.tags():
536 return self.tags()[key]
536 return self.tags()[key]
537 if key in self.branchtags():
537 if key in self.branchtags():
538 return self.branchtags()[key]
538 return self.branchtags()[key]
539 n = self.changelog._partialmatch(key)
539 n = self.changelog._partialmatch(key)
540 if n:
540 if n:
541 return n
541 return n
542
542
543 # can't find key, check if it might have come from damaged dirstate
543 # can't find key, check if it might have come from damaged dirstate
544 if key in self.dirstate.parents():
544 if key in self.dirstate.parents():
545 raise error.Abort(_("working directory has unknown parent '%s'!")
545 raise error.Abort(_("working directory has unknown parent '%s'!")
546 % short(key))
546 % short(key))
547 try:
547 try:
548 if len(key) == 20:
548 if len(key) == 20:
549 key = hex(key)
549 key = hex(key)
550 except:
550 except:
551 pass
551 pass
552 raise error.RepoLookupError(_("unknown revision '%s'") % key)
552 raise error.RepoLookupError(_("unknown revision '%s'") % key)
553
553
554 def lookupbranch(self, key, remote=None):
554 def lookupbranch(self, key, remote=None):
555 repo = remote or self
555 repo = remote or self
556 if key in repo.branchmap():
556 if key in repo.branchmap():
557 return key
557 return key
558
558
559 repo = (remote and remote.local()) and remote or self
559 repo = (remote and remote.local()) and remote or self
560 return repo[key].branch()
560 return repo[key].branch()
561
561
562 def known(self, nodes):
562 def known(self, nodes):
563 nm = self.changelog.nodemap
563 nm = self.changelog.nodemap
564 return [(n in nm) for n in nodes]
564 return [(n in nm) for n in nodes]
565
565
566 def local(self):
566 def local(self):
567 return True
567 return True
568
568
569 def join(self, f):
569 def join(self, f):
570 return os.path.join(self.path, f)
570 return os.path.join(self.path, f)
571
571
572 def wjoin(self, f):
572 def wjoin(self, f):
573 return os.path.join(self.root, f)
573 return os.path.join(self.root, f)
574
574
575 def file(self, f):
575 def file(self, f):
576 if f[0] == '/':
576 if f[0] == '/':
577 f = f[1:]
577 f = f[1:]
578 return filelog.filelog(self.sopener, f)
578 return filelog.filelog(self.sopener, f)
579
579
580 def changectx(self, changeid):
580 def changectx(self, changeid):
581 return self[changeid]
581 return self[changeid]
582
582
583 def parents(self, changeid=None):
583 def parents(self, changeid=None):
584 '''get list of changectxs for parents of changeid'''
584 '''get list of changectxs for parents of changeid'''
585 return self[changeid].parents()
585 return self[changeid].parents()
586
586
587 def filectx(self, path, changeid=None, fileid=None):
587 def filectx(self, path, changeid=None, fileid=None):
588 """changeid can be a changeset revision, node, or tag.
588 """changeid can be a changeset revision, node, or tag.
589 fileid can be a file revision or node."""
589 fileid can be a file revision or node."""
590 return context.filectx(self, path, changeid, fileid)
590 return context.filectx(self, path, changeid, fileid)
591
591
592 def getcwd(self):
592 def getcwd(self):
593 return self.dirstate.getcwd()
593 return self.dirstate.getcwd()
594
594
595 def pathto(self, f, cwd=None):
595 def pathto(self, f, cwd=None):
596 return self.dirstate.pathto(f, cwd)
596 return self.dirstate.pathto(f, cwd)
597
597
598 def wfile(self, f, mode='r'):
598 def wfile(self, f, mode='r'):
599 return self.wopener(f, mode)
599 return self.wopener(f, mode)
600
600
601 def _link(self, f):
601 def _link(self, f):
602 return os.path.islink(self.wjoin(f))
602 return os.path.islink(self.wjoin(f))
603
603
604 def _loadfilter(self, filter):
604 def _loadfilter(self, filter):
605 if filter not in self.filterpats:
605 if filter not in self.filterpats:
606 l = []
606 l = []
607 for pat, cmd in self.ui.configitems(filter):
607 for pat, cmd in self.ui.configitems(filter):
608 if cmd == '!':
608 if cmd == '!':
609 continue
609 continue
610 mf = matchmod.match(self.root, '', [pat])
610 mf = matchmod.match(self.root, '', [pat])
611 fn = None
611 fn = None
612 params = cmd
612 params = cmd
613 for name, filterfn in self._datafilters.iteritems():
613 for name, filterfn in self._datafilters.iteritems():
614 if cmd.startswith(name):
614 if cmd.startswith(name):
615 fn = filterfn
615 fn = filterfn
616 params = cmd[len(name):].lstrip()
616 params = cmd[len(name):].lstrip()
617 break
617 break
618 if not fn:
618 if not fn:
619 fn = lambda s, c, **kwargs: util.filter(s, c)
619 fn = lambda s, c, **kwargs: util.filter(s, c)
620 # Wrap old filters not supporting keyword arguments
620 # Wrap old filters not supporting keyword arguments
621 if not inspect.getargspec(fn)[2]:
621 if not inspect.getargspec(fn)[2]:
622 oldfn = fn
622 oldfn = fn
623 fn = lambda s, c, **kwargs: oldfn(s, c)
623 fn = lambda s, c, **kwargs: oldfn(s, c)
624 l.append((mf, fn, params))
624 l.append((mf, fn, params))
625 self.filterpats[filter] = l
625 self.filterpats[filter] = l
626 return self.filterpats[filter]
626 return self.filterpats[filter]
627
627
628 def _filter(self, filterpats, filename, data):
628 def _filter(self, filterpats, filename, data):
629 for mf, fn, cmd in filterpats:
629 for mf, fn, cmd in filterpats:
630 if mf(filename):
630 if mf(filename):
631 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
631 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
632 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
632 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
633 break
633 break
634
634
635 return data
635 return data
636
636
637 @propertycache
637 @propertycache
638 def _encodefilterpats(self):
638 def _encodefilterpats(self):
639 return self._loadfilter('encode')
639 return self._loadfilter('encode')
640
640
641 @propertycache
641 @propertycache
642 def _decodefilterpats(self):
642 def _decodefilterpats(self):
643 return self._loadfilter('decode')
643 return self._loadfilter('decode')
644
644
645 def adddatafilter(self, name, filter):
645 def adddatafilter(self, name, filter):
646 self._datafilters[name] = filter
646 self._datafilters[name] = filter
647
647
648 def wread(self, filename):
648 def wread(self, filename):
649 if self._link(filename):
649 if self._link(filename):
650 data = os.readlink(self.wjoin(filename))
650 data = os.readlink(self.wjoin(filename))
651 else:
651 else:
652 data = self.wopener(filename, 'r').read()
652 data = self.wopener(filename, 'r').read()
653 return self._filter(self._encodefilterpats, filename, data)
653 return self._filter(self._encodefilterpats, filename, data)
654
654
655 def wwrite(self, filename, data, flags):
655 def wwrite(self, filename, data, flags):
656 data = self._filter(self._decodefilterpats, filename, data)
656 data = self._filter(self._decodefilterpats, filename, data)
657 if 'l' in flags:
657 if 'l' in flags:
658 self.wopener.symlink(data, filename)
658 self.wopener.symlink(data, filename)
659 else:
659 else:
660 self.wopener(filename, 'w').write(data)
660 self.wopener(filename, 'w').write(data)
661 if 'x' in flags:
661 if 'x' in flags:
662 util.set_flags(self.wjoin(filename), False, True)
662 util.set_flags(self.wjoin(filename), False, True)
663
663
664 def wwritedata(self, filename, data):
664 def wwritedata(self, filename, data):
665 return self._filter(self._decodefilterpats, filename, data)
665 return self._filter(self._decodefilterpats, filename, data)
666
666
667 def transaction(self, desc):
667 def transaction(self, desc):
668 tr = self._transref and self._transref() or None
668 tr = self._transref and self._transref() or None
669 if tr and tr.running():
669 if tr and tr.running():
670 return tr.nest()
670 return tr.nest()
671
671
672 # abort here if the journal already exists
672 # abort here if the journal already exists
673 if os.path.exists(self.sjoin("journal")):
673 if os.path.exists(self.sjoin("journal")):
674 raise error.RepoError(
674 raise error.RepoError(
675 _("abandoned transaction found - run hg recover"))
675 _("abandoned transaction found - run hg recover"))
676
676
677 # save dirstate for rollback
677 # save dirstate for rollback
678 try:
678 try:
679 ds = self.opener("dirstate").read()
679 ds = self.opener("dirstate").read()
680 except IOError:
680 except IOError:
681 ds = ""
681 ds = ""
682 self.opener("journal.dirstate", "w").write(ds)
682 self.opener("journal.dirstate", "w").write(ds)
683 self.opener("journal.branch", "w").write(
683 self.opener("journal.branch", "w").write(
684 encoding.fromlocal(self.dirstate.branch()))
684 encoding.fromlocal(self.dirstate.branch()))
685 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
685 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
686
686
687 renames = [(self.sjoin("journal"), self.sjoin("undo")),
687 renames = [(self.sjoin("journal"), self.sjoin("undo")),
688 (self.join("journal.dirstate"), self.join("undo.dirstate")),
688 (self.join("journal.dirstate"), self.join("undo.dirstate")),
689 (self.join("journal.branch"), self.join("undo.branch")),
689 (self.join("journal.branch"), self.join("undo.branch")),
690 (self.join("journal.desc"), self.join("undo.desc"))]
690 (self.join("journal.desc"), self.join("undo.desc"))]
691 tr = transaction.transaction(self.ui.warn, self.sopener,
691 tr = transaction.transaction(self.ui.warn, self.sopener,
692 self.sjoin("journal"),
692 self.sjoin("journal"),
693 aftertrans(renames),
693 aftertrans(renames),
694 self.store.createmode)
694 self.store.createmode)
695 self._transref = weakref.ref(tr)
695 self._transref = weakref.ref(tr)
696 return tr
696 return tr
697
697
698 def recover(self):
698 def recover(self):
699 lock = self.lock()
699 lock = self.lock()
700 try:
700 try:
701 if os.path.exists(self.sjoin("journal")):
701 if os.path.exists(self.sjoin("journal")):
702 self.ui.status(_("rolling back interrupted transaction\n"))
702 self.ui.status(_("rolling back interrupted transaction\n"))
703 transaction.rollback(self.sopener, self.sjoin("journal"),
703 transaction.rollback(self.sopener, self.sjoin("journal"),
704 self.ui.warn)
704 self.ui.warn)
705 self.invalidate()
705 self.invalidate()
706 return True
706 return True
707 else:
707 else:
708 self.ui.warn(_("no interrupted transaction available\n"))
708 self.ui.warn(_("no interrupted transaction available\n"))
709 return False
709 return False
710 finally:
710 finally:
711 lock.release()
711 lock.release()
712
712
713 def rollback(self, dryrun=False):
713 def rollback(self, dryrun=False):
714 wlock = lock = None
714 wlock = lock = None
715 try:
715 try:
716 wlock = self.wlock()
716 wlock = self.wlock()
717 lock = self.lock()
717 lock = self.lock()
718 if os.path.exists(self.sjoin("undo")):
718 if os.path.exists(self.sjoin("undo")):
719 try:
719 try:
720 args = self.opener("undo.desc", "r").read().splitlines()
720 args = self.opener("undo.desc", "r").read().splitlines()
721 if len(args) >= 3 and self.ui.verbose:
721 if len(args) >= 3 and self.ui.verbose:
722 desc = _("repository tip rolled back to revision %s"
722 desc = _("repository tip rolled back to revision %s"
723 " (undo %s: %s)\n") % (
723 " (undo %s: %s)\n") % (
724 int(args[0]) - 1, args[1], args[2])
724 int(args[0]) - 1, args[1], args[2])
725 elif len(args) >= 2:
725 elif len(args) >= 2:
726 desc = _("repository tip rolled back to revision %s"
726 desc = _("repository tip rolled back to revision %s"
727 " (undo %s)\n") % (
727 " (undo %s)\n") % (
728 int(args[0]) - 1, args[1])
728 int(args[0]) - 1, args[1])
729 except IOError:
729 except IOError:
730 desc = _("rolling back unknown transaction\n")
730 desc = _("rolling back unknown transaction\n")
731 self.ui.status(desc)
731 self.ui.status(desc)
732 if dryrun:
732 if dryrun:
733 return
733 return
734 transaction.rollback(self.sopener, self.sjoin("undo"),
734 transaction.rollback(self.sopener, self.sjoin("undo"),
735 self.ui.warn)
735 self.ui.warn)
736 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
736 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
737 if os.path.exists(self.join('undo.bookmarks')):
737 if os.path.exists(self.join('undo.bookmarks')):
738 util.rename(self.join('undo.bookmarks'),
738 util.rename(self.join('undo.bookmarks'),
739 self.join('bookmarks'))
739 self.join('bookmarks'))
740 try:
740 try:
741 branch = self.opener("undo.branch").read()
741 branch = self.opener("undo.branch").read()
742 self.dirstate.setbranch(branch)
742 self.dirstate.setbranch(branch)
743 except IOError:
743 except IOError:
744 self.ui.warn(_("Named branch could not be reset, "
744 self.ui.warn(_("Named branch could not be reset, "
745 "current branch still is: %s\n")
745 "current branch still is: %s\n")
746 % self.dirstate.branch())
746 % self.dirstate.branch())
747 self.invalidate()
747 self.invalidate()
748 self.dirstate.invalidate()
748 self.dirstate.invalidate()
749 self.destroyed()
749 self.destroyed()
750 parents = tuple([p.rev() for p in self.parents()])
750 parents = tuple([p.rev() for p in self.parents()])
751 if len(parents) > 1:
751 if len(parents) > 1:
752 self.ui.status(_("working directory now based on "
752 self.ui.status(_("working directory now based on "
753 "revisions %d and %d\n") % parents)
753 "revisions %d and %d\n") % parents)
754 else:
754 else:
755 self.ui.status(_("working directory now based on "
755 self.ui.status(_("working directory now based on "
756 "revision %d\n") % parents)
756 "revision %d\n") % parents)
757 else:
757 else:
758 self.ui.warn(_("no rollback information available\n"))
758 self.ui.warn(_("no rollback information available\n"))
759 return 1
759 return 1
760 finally:
760 finally:
761 release(lock, wlock)
761 release(lock, wlock)
762
762
763 def invalidatecaches(self):
763 def invalidatecaches(self):
764 self._tags = None
764 self._tags = None
765 self._tagtypes = None
765 self._tagtypes = None
766 self.nodetagscache = None
766 self.nodetagscache = None
767 self._branchcache = None # in UTF-8
767 self._branchcache = None # in UTF-8
768 self._branchcachetip = None
768 self._branchcachetip = None
769
769
770 def invalidate(self):
770 def invalidate(self):
771 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkcurrent"):
771 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkcurrent"):
772 if a in self.__dict__:
772 if a in self.__dict__:
773 delattr(self, a)
773 delattr(self, a)
774 self.invalidatecaches()
774 self.invalidatecaches()
775
775
776 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
776 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
777 try:
777 try:
778 l = lock.lock(lockname, 0, releasefn, desc=desc)
778 l = lock.lock(lockname, 0, releasefn, desc=desc)
779 except error.LockHeld, inst:
779 except error.LockHeld, inst:
780 if not wait:
780 if not wait:
781 raise
781 raise
782 self.ui.warn(_("waiting for lock on %s held by %r\n") %
782 self.ui.warn(_("waiting for lock on %s held by %r\n") %
783 (desc, inst.locker))
783 (desc, inst.locker))
784 # default to 600 seconds timeout
784 # default to 600 seconds timeout
785 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
785 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
786 releasefn, desc=desc)
786 releasefn, desc=desc)
787 if acquirefn:
787 if acquirefn:
788 acquirefn()
788 acquirefn()
789 return l
789 return l
790
790
791 def lock(self, wait=True):
791 def lock(self, wait=True):
792 '''Lock the repository store (.hg/store) and return a weak reference
792 '''Lock the repository store (.hg/store) and return a weak reference
793 to the lock. Use this before modifying the store (e.g. committing or
793 to the lock. Use this before modifying the store (e.g. committing or
794 stripping). If you are opening a transaction, get a lock as well.)'''
794 stripping). If you are opening a transaction, get a lock as well.)'''
795 l = self._lockref and self._lockref()
795 l = self._lockref and self._lockref()
796 if l is not None and l.held:
796 if l is not None and l.held:
797 l.lock()
797 l.lock()
798 return l
798 return l
799
799
800 l = self._lock(self.sjoin("lock"), wait, self.store.write,
800 l = self._lock(self.sjoin("lock"), wait, self.store.write,
801 self.invalidate, _('repository %s') % self.origroot)
801 self.invalidate, _('repository %s') % self.origroot)
802 self._lockref = weakref.ref(l)
802 self._lockref = weakref.ref(l)
803 return l
803 return l
804
804
805 def wlock(self, wait=True):
805 def wlock(self, wait=True):
806 '''Lock the non-store parts of the repository (everything under
806 '''Lock the non-store parts of the repository (everything under
807 .hg except .hg/store) and return a weak reference to the lock.
807 .hg except .hg/store) and return a weak reference to the lock.
808 Use this before modifying files in .hg.'''
808 Use this before modifying files in .hg.'''
809 l = self._wlockref and self._wlockref()
809 l = self._wlockref and self._wlockref()
810 if l is not None and l.held:
810 if l is not None and l.held:
811 l.lock()
811 l.lock()
812 return l
812 return l
813
813
814 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
814 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
815 self.dirstate.invalidate, _('working directory of %s') %
815 self.dirstate.invalidate, _('working directory of %s') %
816 self.origroot)
816 self.origroot)
817 self._wlockref = weakref.ref(l)
817 self._wlockref = weakref.ref(l)
818 return l
818 return l
819
819
820 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
820 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
821 """
821 """
822 commit an individual file as part of a larger transaction
822 commit an individual file as part of a larger transaction
823 """
823 """
824
824
825 fname = fctx.path()
825 fname = fctx.path()
826 text = fctx.data()
826 text = fctx.data()
827 flog = self.file(fname)
827 flog = self.file(fname)
828 fparent1 = manifest1.get(fname, nullid)
828 fparent1 = manifest1.get(fname, nullid)
829 fparent2 = fparent2o = manifest2.get(fname, nullid)
829 fparent2 = fparent2o = manifest2.get(fname, nullid)
830
830
831 meta = {}
831 meta = {}
832 copy = fctx.renamed()
832 copy = fctx.renamed()
833 if copy and copy[0] != fname:
833 if copy and copy[0] != fname:
834 # Mark the new revision of this file as a copy of another
834 # Mark the new revision of this file as a copy of another
835 # file. This copy data will effectively act as a parent
835 # file. This copy data will effectively act as a parent
836 # of this new revision. If this is a merge, the first
836 # of this new revision. If this is a merge, the first
837 # parent will be the nullid (meaning "look up the copy data")
837 # parent will be the nullid (meaning "look up the copy data")
838 # and the second one will be the other parent. For example:
838 # and the second one will be the other parent. For example:
839 #
839 #
840 # 0 --- 1 --- 3 rev1 changes file foo
840 # 0 --- 1 --- 3 rev1 changes file foo
841 # \ / rev2 renames foo to bar and changes it
841 # \ / rev2 renames foo to bar and changes it
842 # \- 2 -/ rev3 should have bar with all changes and
842 # \- 2 -/ rev3 should have bar with all changes and
843 # should record that bar descends from
843 # should record that bar descends from
844 # bar in rev2 and foo in rev1
844 # bar in rev2 and foo in rev1
845 #
845 #
846 # this allows this merge to succeed:
846 # this allows this merge to succeed:
847 #
847 #
848 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
848 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
849 # \ / merging rev3 and rev4 should use bar@rev2
849 # \ / merging rev3 and rev4 should use bar@rev2
850 # \- 2 --- 4 as the merge base
850 # \- 2 --- 4 as the merge base
851 #
851 #
852
852
853 cfname = copy[0]
853 cfname = copy[0]
854 crev = manifest1.get(cfname)
854 crev = manifest1.get(cfname)
855 newfparent = fparent2
855 newfparent = fparent2
856
856
857 if manifest2: # branch merge
857 if manifest2: # branch merge
858 if fparent2 == nullid or crev is None: # copied on remote side
858 if fparent2 == nullid or crev is None: # copied on remote side
859 if cfname in manifest2:
859 if cfname in manifest2:
860 crev = manifest2[cfname]
860 crev = manifest2[cfname]
861 newfparent = fparent1
861 newfparent = fparent1
862
862
863 # find source in nearest ancestor if we've lost track
863 # find source in nearest ancestor if we've lost track
864 if not crev:
864 if not crev:
865 self.ui.debug(" %s: searching for copy revision for %s\n" %
865 self.ui.debug(" %s: searching for copy revision for %s\n" %
866 (fname, cfname))
866 (fname, cfname))
867 for ancestor in self[None].ancestors():
867 for ancestor in self[None].ancestors():
868 if cfname in ancestor:
868 if cfname in ancestor:
869 crev = ancestor[cfname].filenode()
869 crev = ancestor[cfname].filenode()
870 break
870 break
871
871
872 if crev:
872 if crev:
873 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
873 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
874 meta["copy"] = cfname
874 meta["copy"] = cfname
875 meta["copyrev"] = hex(crev)
875 meta["copyrev"] = hex(crev)
876 fparent1, fparent2 = nullid, newfparent
876 fparent1, fparent2 = nullid, newfparent
877 else:
877 else:
878 self.ui.warn(_("warning: can't find ancestor for '%s' "
878 self.ui.warn(_("warning: can't find ancestor for '%s' "
879 "copied from '%s'!\n") % (fname, cfname))
879 "copied from '%s'!\n") % (fname, cfname))
880
880
881 elif fparent2 != nullid:
881 elif fparent2 != nullid:
882 # is one parent an ancestor of the other?
882 # is one parent an ancestor of the other?
883 fparentancestor = flog.ancestor(fparent1, fparent2)
883 fparentancestor = flog.ancestor(fparent1, fparent2)
884 if fparentancestor == fparent1:
884 if fparentancestor == fparent1:
885 fparent1, fparent2 = fparent2, nullid
885 fparent1, fparent2 = fparent2, nullid
886 elif fparentancestor == fparent2:
886 elif fparentancestor == fparent2:
887 fparent2 = nullid
887 fparent2 = nullid
888
888
889 # is the file changed?
889 # is the file changed?
890 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
890 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
891 changelist.append(fname)
891 changelist.append(fname)
892 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
892 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
893
893
894 # are just the flags changed during merge?
894 # are just the flags changed during merge?
895 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
895 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
896 changelist.append(fname)
896 changelist.append(fname)
897
897
898 return fparent1
898 return fparent1
899
899
900 def commit(self, text="", user=None, date=None, match=None, force=False,
900 def commit(self, text="", user=None, date=None, match=None, force=False,
901 editor=False, extra={}):
901 editor=False, extra={}):
902 """Add a new revision to current repository.
902 """Add a new revision to current repository.
903
903
904 Revision information is gathered from the working directory,
904 Revision information is gathered from the working directory,
905 match can be used to filter the committed files. If editor is
905 match can be used to filter the committed files. If editor is
906 supplied, it is called to get a commit message.
906 supplied, it is called to get a commit message.
907 """
907 """
908
908
909 def fail(f, msg):
909 def fail(f, msg):
910 raise util.Abort('%s: %s' % (f, msg))
910 raise util.Abort('%s: %s' % (f, msg))
911
911
912 if not match:
912 if not match:
913 match = matchmod.always(self.root, '')
913 match = matchmod.always(self.root, '')
914
914
915 if not force:
915 if not force:
916 vdirs = []
916 vdirs = []
917 match.dir = vdirs.append
917 match.dir = vdirs.append
918 match.bad = fail
918 match.bad = fail
919
919
920 wlock = self.wlock()
920 wlock = self.wlock()
921 try:
921 try:
922 wctx = self[None]
922 wctx = self[None]
923 merge = len(wctx.parents()) > 1
923 merge = len(wctx.parents()) > 1
924
924
925 if (not force and merge and match and
925 if (not force and merge and match and
926 (match.files() or match.anypats())):
926 (match.files() or match.anypats())):
927 raise util.Abort(_('cannot partially commit a merge '
927 raise util.Abort(_('cannot partially commit a merge '
928 '(do not specify files or patterns)'))
928 '(do not specify files or patterns)'))
929
929
930 changes = self.status(match=match, clean=force)
930 changes = self.status(match=match, clean=force)
931 if force:
931 if force:
932 changes[0].extend(changes[6]) # mq may commit unchanged files
932 changes[0].extend(changes[6]) # mq may commit unchanged files
933
933
934 # check subrepos
934 # check subrepos
935 subs = []
935 subs = []
936 removedsubs = set()
936 removedsubs = set()
937 for p in wctx.parents():
937 for p in wctx.parents():
938 removedsubs.update(s for s in p.substate if match(s))
938 removedsubs.update(s for s in p.substate if match(s))
939 for s in wctx.substate:
939 for s in wctx.substate:
940 removedsubs.discard(s)
940 removedsubs.discard(s)
941 if match(s) and wctx.sub(s).dirty():
941 if match(s) and wctx.sub(s).dirty():
942 subs.append(s)
942 subs.append(s)
943 if (subs or removedsubs):
943 if (subs or removedsubs):
944 if (not match('.hgsub') and
944 if (not match('.hgsub') and
945 '.hgsub' in (wctx.modified() + wctx.added())):
945 '.hgsub' in (wctx.modified() + wctx.added())):
946 raise util.Abort(_("can't commit subrepos without .hgsub"))
946 raise util.Abort(_("can't commit subrepos without .hgsub"))
947 if '.hgsubstate' not in changes[0]:
947 if '.hgsubstate' not in changes[0]:
948 changes[0].insert(0, '.hgsubstate')
948 changes[0].insert(0, '.hgsubstate')
949
949
950 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
950 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
951 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
951 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
952 if changedsubs:
952 if changedsubs:
953 raise util.Abort(_("uncommitted changes in subrepo %s")
953 raise util.Abort(_("uncommitted changes in subrepo %s")
954 % changedsubs[0])
954 % changedsubs[0])
955
955
956 # make sure all explicit patterns are matched
956 # make sure all explicit patterns are matched
957 if not force and match.files():
957 if not force and match.files():
958 matched = set(changes[0] + changes[1] + changes[2])
958 matched = set(changes[0] + changes[1] + changes[2])
959
959
960 for f in match.files():
960 for f in match.files():
961 if f == '.' or f in matched or f in wctx.substate:
961 if f == '.' or f in matched or f in wctx.substate:
962 continue
962 continue
963 if f in changes[3]: # missing
963 if f in changes[3]: # missing
964 fail(f, _('file not found!'))
964 fail(f, _('file not found!'))
965 if f in vdirs: # visited directory
965 if f in vdirs: # visited directory
966 d = f + '/'
966 d = f + '/'
967 for mf in matched:
967 for mf in matched:
968 if mf.startswith(d):
968 if mf.startswith(d):
969 break
969 break
970 else:
970 else:
971 fail(f, _("no match under directory!"))
971 fail(f, _("no match under directory!"))
972 elif f not in self.dirstate:
972 elif f not in self.dirstate:
973 fail(f, _("file not tracked!"))
973 fail(f, _("file not tracked!"))
974
974
975 if (not force and not extra.get("close") and not merge
975 if (not force and not extra.get("close") and not merge
976 and not (changes[0] or changes[1] or changes[2])
976 and not (changes[0] or changes[1] or changes[2])
977 and wctx.branch() == wctx.p1().branch()):
977 and wctx.branch() == wctx.p1().branch()):
978 return None
978 return None
979
979
980 ms = mergemod.mergestate(self)
980 ms = mergemod.mergestate(self)
981 for f in changes[0]:
981 for f in changes[0]:
982 if f in ms and ms[f] == 'u':
982 if f in ms and ms[f] == 'u':
983 raise util.Abort(_("unresolved merge conflicts "
983 raise util.Abort(_("unresolved merge conflicts "
984 "(see hg help resolve)"))
984 "(see hg help resolve)"))
985
985
986 cctx = context.workingctx(self, text, user, date, extra, changes)
986 cctx = context.workingctx(self, text, user, date, extra, changes)
987 if editor:
987 if editor:
988 cctx._text = editor(self, cctx, subs)
988 cctx._text = editor(self, cctx, subs)
989 edited = (text != cctx._text)
989 edited = (text != cctx._text)
990
990
991 # commit subs
991 # commit subs
992 if subs or removedsubs:
992 if subs or removedsubs:
993 state = wctx.substate.copy()
993 state = wctx.substate.copy()
994 for s in sorted(subs):
994 for s in sorted(subs):
995 sub = wctx.sub(s)
995 sub = wctx.sub(s)
996 self.ui.status(_('committing subrepository %s\n') %
996 self.ui.status(_('committing subrepository %s\n') %
997 subrepo.subrelpath(sub))
997 subrepo.subrelpath(sub))
998 sr = sub.commit(cctx._text, user, date)
998 sr = sub.commit(cctx._text, user, date)
999 state[s] = (state[s][0], sr)
999 state[s] = (state[s][0], sr)
1000 subrepo.writestate(self, state)
1000 subrepo.writestate(self, state)
1001
1001
1002 # Save commit message in case this transaction gets rolled back
1002 # Save commit message in case this transaction gets rolled back
1003 # (e.g. by a pretxncommit hook). Leave the content alone on
1003 # (e.g. by a pretxncommit hook). Leave the content alone on
1004 # the assumption that the user will use the same editor again.
1004 # the assumption that the user will use the same editor again.
1005 msgfile = self.opener('last-message.txt', 'wb')
1005 msgfile = self.opener('last-message.txt', 'wb')
1006 msgfile.write(cctx._text)
1006 msgfile.write(cctx._text)
1007 msgfile.close()
1007 msgfile.close()
1008
1008
1009 p1, p2 = self.dirstate.parents()
1009 p1, p2 = self.dirstate.parents()
1010 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1010 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1011 try:
1011 try:
1012 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1012 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1013 ret = self.commitctx(cctx, True)
1013 ret = self.commitctx(cctx, True)
1014 except:
1014 except:
1015 if edited:
1015 if edited:
1016 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
1016 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
1017 self.ui.write(
1017 self.ui.write(
1018 _('note: commit message saved in %s\n') % msgfn)
1018 _('note: commit message saved in %s\n') % msgfn)
1019 raise
1019 raise
1020
1020
1021 # update bookmarks, dirstate and mergestate
1021 # update bookmarks, dirstate and mergestate
1022 bookmarks.update(self, p1, ret)
1022 bookmarks.update(self, p1, ret)
1023 for f in changes[0] + changes[1]:
1023 for f in changes[0] + changes[1]:
1024 self.dirstate.normal(f)
1024 self.dirstate.normal(f)
1025 for f in changes[2]:
1025 for f in changes[2]:
1026 self.dirstate.forget(f)
1026 self.dirstate.forget(f)
1027 self.dirstate.setparents(ret)
1027 self.dirstate.setparents(ret)
1028 ms.reset()
1028 ms.reset()
1029 finally:
1029 finally:
1030 wlock.release()
1030 wlock.release()
1031
1031
1032 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1032 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1033 return ret
1033 return ret
1034
1034
1035 def commitctx(self, ctx, error=False):
1035 def commitctx(self, ctx, error=False):
1036 """Add a new revision to current repository.
1036 """Add a new revision to current repository.
1037 Revision information is passed via the context argument.
1037 Revision information is passed via the context argument.
1038 """
1038 """
1039
1039
1040 tr = lock = None
1040 tr = lock = None
1041 removed = list(ctx.removed())
1041 removed = list(ctx.removed())
1042 p1, p2 = ctx.p1(), ctx.p2()
1042 p1, p2 = ctx.p1(), ctx.p2()
1043 m1 = p1.manifest().copy()
1043 m1 = p1.manifest().copy()
1044 m2 = p2.manifest()
1044 m2 = p2.manifest()
1045 user = ctx.user()
1045 user = ctx.user()
1046
1046
1047 lock = self.lock()
1047 lock = self.lock()
1048 try:
1048 try:
1049 tr = self.transaction("commit")
1049 tr = self.transaction("commit")
1050 trp = weakref.proxy(tr)
1050 trp = weakref.proxy(tr)
1051
1051
1052 # check in files
1052 # check in files
1053 new = {}
1053 new = {}
1054 changed = []
1054 changed = []
1055 linkrev = len(self)
1055 linkrev = len(self)
1056 for f in sorted(ctx.modified() + ctx.added()):
1056 for f in sorted(ctx.modified() + ctx.added()):
1057 self.ui.note(f + "\n")
1057 self.ui.note(f + "\n")
1058 try:
1058 try:
1059 fctx = ctx[f]
1059 fctx = ctx[f]
1060 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1060 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1061 changed)
1061 changed)
1062 m1.set(f, fctx.flags())
1062 m1.set(f, fctx.flags())
1063 except OSError, inst:
1063 except OSError, inst:
1064 self.ui.warn(_("trouble committing %s!\n") % f)
1064 self.ui.warn(_("trouble committing %s!\n") % f)
1065 raise
1065 raise
1066 except IOError, inst:
1066 except IOError, inst:
1067 errcode = getattr(inst, 'errno', errno.ENOENT)
1067 errcode = getattr(inst, 'errno', errno.ENOENT)
1068 if error or errcode and errcode != errno.ENOENT:
1068 if error or errcode and errcode != errno.ENOENT:
1069 self.ui.warn(_("trouble committing %s!\n") % f)
1069 self.ui.warn(_("trouble committing %s!\n") % f)
1070 raise
1070 raise
1071 else:
1071 else:
1072 removed.append(f)
1072 removed.append(f)
1073
1073
1074 # update manifest
1074 # update manifest
1075 m1.update(new)
1075 m1.update(new)
1076 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1076 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1077 drop = [f for f in removed if f in m1]
1077 drop = [f for f in removed if f in m1]
1078 for f in drop:
1078 for f in drop:
1079 del m1[f]
1079 del m1[f]
1080 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1080 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1081 p2.manifestnode(), (new, drop))
1081 p2.manifestnode(), (new, drop))
1082
1082
1083 # update changelog
1083 # update changelog
1084 self.changelog.delayupdate()
1084 self.changelog.delayupdate()
1085 n = self.changelog.add(mn, changed + removed, ctx.description(),
1085 n = self.changelog.add(mn, changed + removed, ctx.description(),
1086 trp, p1.node(), p2.node(),
1086 trp, p1.node(), p2.node(),
1087 user, ctx.date(), ctx.extra().copy())
1087 user, ctx.date(), ctx.extra().copy())
1088 p = lambda: self.changelog.writepending() and self.root or ""
1088 p = lambda: self.changelog.writepending() and self.root or ""
1089 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1089 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1090 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1090 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1091 parent2=xp2, pending=p)
1091 parent2=xp2, pending=p)
1092 self.changelog.finalize(trp)
1092 self.changelog.finalize(trp)
1093 tr.close()
1093 tr.close()
1094
1094
1095 if self._branchcache:
1095 if self._branchcache:
1096 self.updatebranchcache()
1096 self.updatebranchcache()
1097 return n
1097 return n
1098 finally:
1098 finally:
1099 if tr:
1099 if tr:
1100 tr.release()
1100 tr.release()
1101 lock.release()
1101 lock.release()
1102
1102
1103 def destroyed(self):
1103 def destroyed(self):
1104 '''Inform the repository that nodes have been destroyed.
1104 '''Inform the repository that nodes have been destroyed.
1105 Intended for use by strip and rollback, so there's a common
1105 Intended for use by strip and rollback, so there's a common
1106 place for anything that has to be done after destroying history.'''
1106 place for anything that has to be done after destroying history.'''
1107 # XXX it might be nice if we could take the list of destroyed
1107 # XXX it might be nice if we could take the list of destroyed
1108 # nodes, but I don't see an easy way for rollback() to do that
1108 # nodes, but I don't see an easy way for rollback() to do that
1109
1109
1110 # Ensure the persistent tag cache is updated. Doing it now
1110 # Ensure the persistent tag cache is updated. Doing it now
1111 # means that the tag cache only has to worry about destroyed
1111 # means that the tag cache only has to worry about destroyed
1112 # heads immediately after a strip/rollback. That in turn
1112 # heads immediately after a strip/rollback. That in turn
1113 # guarantees that "cachetip == currenttip" (comparing both rev
1113 # guarantees that "cachetip == currenttip" (comparing both rev
1114 # and node) always means no nodes have been added or destroyed.
1114 # and node) always means no nodes have been added or destroyed.
1115
1115
1116 # XXX this is suboptimal when qrefresh'ing: we strip the current
1116 # XXX this is suboptimal when qrefresh'ing: we strip the current
1117 # head, refresh the tag cache, then immediately add a new head.
1117 # head, refresh the tag cache, then immediately add a new head.
1118 # But I think doing it this way is necessary for the "instant
1118 # But I think doing it this way is necessary for the "instant
1119 # tag cache retrieval" case to work.
1119 # tag cache retrieval" case to work.
1120 self.invalidatecaches()
1120 self.invalidatecaches()
1121
1121
1122 def walk(self, match, node=None):
1122 def walk(self, match, node=None):
1123 '''
1123 '''
1124 walk recursively through the directory tree or a given
1124 walk recursively through the directory tree or a given
1125 changeset, finding all files matched by the match
1125 changeset, finding all files matched by the match
1126 function
1126 function
1127 '''
1127 '''
1128 return self[node].walk(match)
1128 return self[node].walk(match)
1129
1129
1130 def status(self, node1='.', node2=None, match=None,
1130 def status(self, node1='.', node2=None, match=None,
1131 ignored=False, clean=False, unknown=False,
1131 ignored=False, clean=False, unknown=False,
1132 listsubrepos=False):
1132 listsubrepos=False):
1133 """return status of files between two nodes or node and working directory
1133 """return status of files between two nodes or node and working directory
1134
1134
1135 If node1 is None, use the first dirstate parent instead.
1135 If node1 is None, use the first dirstate parent instead.
1136 If node2 is None, compare node1 with working directory.
1136 If node2 is None, compare node1 with working directory.
1137 """
1137 """
1138
1138
1139 def mfmatches(ctx):
1139 def mfmatches(ctx):
1140 mf = ctx.manifest().copy()
1140 mf = ctx.manifest().copy()
1141 for fn in mf.keys():
1141 for fn in mf.keys():
1142 if not match(fn):
1142 if not match(fn):
1143 del mf[fn]
1143 del mf[fn]
1144 return mf
1144 return mf
1145
1145
1146 if isinstance(node1, context.changectx):
1146 if isinstance(node1, context.changectx):
1147 ctx1 = node1
1147 ctx1 = node1
1148 else:
1148 else:
1149 ctx1 = self[node1]
1149 ctx1 = self[node1]
1150 if isinstance(node2, context.changectx):
1150 if isinstance(node2, context.changectx):
1151 ctx2 = node2
1151 ctx2 = node2
1152 else:
1152 else:
1153 ctx2 = self[node2]
1153 ctx2 = self[node2]
1154
1154
1155 working = ctx2.rev() is None
1155 working = ctx2.rev() is None
1156 parentworking = working and ctx1 == self['.']
1156 parentworking = working and ctx1 == self['.']
1157 match = match or matchmod.always(self.root, self.getcwd())
1157 match = match or matchmod.always(self.root, self.getcwd())
1158 listignored, listclean, listunknown = ignored, clean, unknown
1158 listignored, listclean, listunknown = ignored, clean, unknown
1159
1159
1160 # load earliest manifest first for caching reasons
1160 # load earliest manifest first for caching reasons
1161 if not working and ctx2.rev() < ctx1.rev():
1161 if not working and ctx2.rev() < ctx1.rev():
1162 ctx2.manifest()
1162 ctx2.manifest()
1163
1163
1164 if not parentworking:
1164 if not parentworking:
1165 def bad(f, msg):
1165 def bad(f, msg):
1166 if f not in ctx1:
1166 if f not in ctx1:
1167 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1167 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1168 match.bad = bad
1168 match.bad = bad
1169
1169
1170 if working: # we need to scan the working dir
1170 if working: # we need to scan the working dir
1171 subrepos = []
1171 subrepos = []
1172 if '.hgsub' in self.dirstate:
1172 if '.hgsub' in self.dirstate:
1173 subrepos = ctx1.substate.keys()
1173 subrepos = ctx1.substate.keys()
1174 s = self.dirstate.status(match, subrepos, listignored,
1174 s = self.dirstate.status(match, subrepos, listignored,
1175 listclean, listunknown)
1175 listclean, listunknown)
1176 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1176 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1177
1177
1178 # check for any possibly clean files
1178 # check for any possibly clean files
1179 if parentworking and cmp:
1179 if parentworking and cmp:
1180 fixup = []
1180 fixup = []
1181 # do a full compare of any files that might have changed
1181 # do a full compare of any files that might have changed
1182 for f in sorted(cmp):
1182 for f in sorted(cmp):
1183 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1183 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1184 or ctx1[f].cmp(ctx2[f])):
1184 or ctx1[f].cmp(ctx2[f])):
1185 modified.append(f)
1185 modified.append(f)
1186 else:
1186 else:
1187 fixup.append(f)
1187 fixup.append(f)
1188
1188
1189 # update dirstate for files that are actually clean
1189 # update dirstate for files that are actually clean
1190 if fixup:
1190 if fixup:
1191 if listclean:
1191 if listclean:
1192 clean += fixup
1192 clean += fixup
1193
1193
1194 try:
1194 try:
1195 # updating the dirstate is optional
1195 # updating the dirstate is optional
1196 # so we don't wait on the lock
1196 # so we don't wait on the lock
1197 wlock = self.wlock(False)
1197 wlock = self.wlock(False)
1198 try:
1198 try:
1199 for f in fixup:
1199 for f in fixup:
1200 self.dirstate.normal(f)
1200 self.dirstate.normal(f)
1201 finally:
1201 finally:
1202 wlock.release()
1202 wlock.release()
1203 except error.LockError:
1203 except error.LockError:
1204 pass
1204 pass
1205
1205
1206 if not parentworking:
1206 if not parentworking:
1207 mf1 = mfmatches(ctx1)
1207 mf1 = mfmatches(ctx1)
1208 if working:
1208 if working:
1209 # we are comparing working dir against non-parent
1209 # we are comparing working dir against non-parent
1210 # generate a pseudo-manifest for the working dir
1210 # generate a pseudo-manifest for the working dir
1211 mf2 = mfmatches(self['.'])
1211 mf2 = mfmatches(self['.'])
1212 for f in cmp + modified + added:
1212 for f in cmp + modified + added:
1213 mf2[f] = None
1213 mf2[f] = None
1214 mf2.set(f, ctx2.flags(f))
1214 mf2.set(f, ctx2.flags(f))
1215 for f in removed:
1215 for f in removed:
1216 if f in mf2:
1216 if f in mf2:
1217 del mf2[f]
1217 del mf2[f]
1218 else:
1218 else:
1219 # we are comparing two revisions
1219 # we are comparing two revisions
1220 deleted, unknown, ignored = [], [], []
1220 deleted, unknown, ignored = [], [], []
1221 mf2 = mfmatches(ctx2)
1221 mf2 = mfmatches(ctx2)
1222
1222
1223 modified, added, clean = [], [], []
1223 modified, added, clean = [], [], []
1224 for fn in mf2:
1224 for fn in mf2:
1225 if fn in mf1:
1225 if fn in mf1:
1226 if (mf1.flags(fn) != mf2.flags(fn) or
1226 if (mf1.flags(fn) != mf2.flags(fn) or
1227 (mf1[fn] != mf2[fn] and
1227 (mf1[fn] != mf2[fn] and
1228 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1228 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1229 modified.append(fn)
1229 modified.append(fn)
1230 elif listclean:
1230 elif listclean:
1231 clean.append(fn)
1231 clean.append(fn)
1232 del mf1[fn]
1232 del mf1[fn]
1233 else:
1233 else:
1234 added.append(fn)
1234 added.append(fn)
1235 removed = mf1.keys()
1235 removed = mf1.keys()
1236
1236
1237 r = modified, added, removed, deleted, unknown, ignored, clean
1237 r = modified, added, removed, deleted, unknown, ignored, clean
1238
1238
1239 if listsubrepos:
1239 if listsubrepos:
1240 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1240 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1241 if working:
1241 if working:
1242 rev2 = None
1242 rev2 = None
1243 else:
1243 else:
1244 rev2 = ctx2.substate[subpath][1]
1244 rev2 = ctx2.substate[subpath][1]
1245 try:
1245 try:
1246 submatch = matchmod.narrowmatcher(subpath, match)
1246 submatch = matchmod.narrowmatcher(subpath, match)
1247 s = sub.status(rev2, match=submatch, ignored=listignored,
1247 s = sub.status(rev2, match=submatch, ignored=listignored,
1248 clean=listclean, unknown=listunknown,
1248 clean=listclean, unknown=listunknown,
1249 listsubrepos=True)
1249 listsubrepos=True)
1250 for rfiles, sfiles in zip(r, s):
1250 for rfiles, sfiles in zip(r, s):
1251 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1251 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1252 except error.LookupError:
1252 except error.LookupError:
1253 self.ui.status(_("skipping missing subrepository: %s\n")
1253 self.ui.status(_("skipping missing subrepository: %s\n")
1254 % subpath)
1254 % subpath)
1255
1255
1256 for l in r:
1256 for l in r:
1257 l.sort()
1257 l.sort()
1258 return r
1258 return r
1259
1259
1260 def heads(self, start=None):
1260 def heads(self, start=None):
1261 heads = self.changelog.heads(start)
1261 heads = self.changelog.heads(start)
1262 # sort the output in rev descending order
1262 # sort the output in rev descending order
1263 return sorted(heads, key=self.changelog.rev, reverse=True)
1263 return sorted(heads, key=self.changelog.rev, reverse=True)
1264
1264
1265 def branchheads(self, branch=None, start=None, closed=False):
1265 def branchheads(self, branch=None, start=None, closed=False):
1266 '''return a (possibly filtered) list of heads for the given branch
1266 '''return a (possibly filtered) list of heads for the given branch
1267
1267
1268 Heads are returned in topological order, from newest to oldest.
1268 Heads are returned in topological order, from newest to oldest.
1269 If branch is None, use the dirstate branch.
1269 If branch is None, use the dirstate branch.
1270 If start is not None, return only heads reachable from start.
1270 If start is not None, return only heads reachable from start.
1271 If closed is True, return heads that are marked as closed as well.
1271 If closed is True, return heads that are marked as closed as well.
1272 '''
1272 '''
1273 if branch is None:
1273 if branch is None:
1274 branch = self[None].branch()
1274 branch = self[None].branch()
1275 branches = self.branchmap()
1275 branches = self.branchmap()
1276 if branch not in branches:
1276 if branch not in branches:
1277 return []
1277 return []
1278 # the cache returns heads ordered lowest to highest
1278 # the cache returns heads ordered lowest to highest
1279 bheads = list(reversed(branches[branch]))
1279 bheads = list(reversed(branches[branch]))
1280 if start is not None:
1280 if start is not None:
1281 # filter out the heads that cannot be reached from startrev
1281 # filter out the heads that cannot be reached from startrev
1282 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1282 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1283 bheads = [h for h in bheads if h in fbheads]
1283 bheads = [h for h in bheads if h in fbheads]
1284 if not closed:
1284 if not closed:
1285 bheads = [h for h in bheads if
1285 bheads = [h for h in bheads if
1286 ('close' not in self.changelog.read(h)[5])]
1286 ('close' not in self.changelog.read(h)[5])]
1287 return bheads
1287 return bheads
1288
1288
1289 def branches(self, nodes):
1289 def branches(self, nodes):
1290 if not nodes:
1290 if not nodes:
1291 nodes = [self.changelog.tip()]
1291 nodes = [self.changelog.tip()]
1292 b = []
1292 b = []
1293 for n in nodes:
1293 for n in nodes:
1294 t = n
1294 t = n
1295 while 1:
1295 while 1:
1296 p = self.changelog.parents(n)
1296 p = self.changelog.parents(n)
1297 if p[1] != nullid or p[0] == nullid:
1297 if p[1] != nullid or p[0] == nullid:
1298 b.append((t, n, p[0], p[1]))
1298 b.append((t, n, p[0], p[1]))
1299 break
1299 break
1300 n = p[0]
1300 n = p[0]
1301 return b
1301 return b
1302
1302
1303 def between(self, pairs):
1303 def between(self, pairs):
1304 r = []
1304 r = []
1305
1305
1306 for top, bottom in pairs:
1306 for top, bottom in pairs:
1307 n, l, i = top, [], 0
1307 n, l, i = top, [], 0
1308 f = 1
1308 f = 1
1309
1309
1310 while n != bottom and n != nullid:
1310 while n != bottom and n != nullid:
1311 p = self.changelog.parents(n)[0]
1311 p = self.changelog.parents(n)[0]
1312 if i == f:
1312 if i == f:
1313 l.append(n)
1313 l.append(n)
1314 f = f * 2
1314 f = f * 2
1315 n = p
1315 n = p
1316 i += 1
1316 i += 1
1317
1317
1318 r.append(l)
1318 r.append(l)
1319
1319
1320 return r
1320 return r
1321
1321
1322 def pull(self, remote, heads=None, force=False):
1322 def pull(self, remote, heads=None, force=False):
1323 lock = self.lock()
1323 lock = self.lock()
1324 try:
1324 try:
1325 usecommon = remote.capable('getbundle')
1325 usecommon = remote.capable('getbundle')
1326 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1326 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1327 force=force, commononly=usecommon)
1327 force=force, commononly=usecommon)
1328 common, fetch, rheads = tmp
1328 common, fetch, rheads = tmp
1329 if not fetch:
1329 if not fetch:
1330 self.ui.status(_("no changes found\n"))
1330 self.ui.status(_("no changes found\n"))
1331 result = 0
1331 result = 0
1332 else:
1332 else:
1333 if heads is None and list(common) == [nullid]:
1333 if heads is None and list(common) == [nullid]:
1334 self.ui.status(_("requesting all changes\n"))
1334 self.ui.status(_("requesting all changes\n"))
1335 elif heads is None and remote.capable('changegroupsubset'):
1335 elif heads is None and remote.capable('changegroupsubset'):
1336 # issue1320, avoid a race if remote changed after discovery
1336 # issue1320, avoid a race if remote changed after discovery
1337 heads = rheads
1337 heads = rheads
1338
1338
1339 if usecommon:
1339 if usecommon:
1340 cg = remote.getbundle('pull', common=common,
1340 cg = remote.getbundle('pull', common=common,
1341 heads=heads or rheads)
1341 heads=heads or rheads)
1342 elif heads is None:
1342 elif heads is None:
1343 cg = remote.changegroup(fetch, 'pull')
1343 cg = remote.changegroup(fetch, 'pull')
1344 elif not remote.capable('changegroupsubset'):
1344 elif not remote.capable('changegroupsubset'):
1345 raise util.Abort(_("partial pull cannot be done because "
1345 raise util.Abort(_("partial pull cannot be done because "
1346 "other repository doesn't support "
1346 "other repository doesn't support "
1347 "changegroupsubset."))
1347 "changegroupsubset."))
1348 else:
1348 else:
1349 cg = remote.changegroupsubset(fetch, heads, 'pull')
1349 cg = remote.changegroupsubset(fetch, heads, 'pull')
1350 result = self.addchangegroup(cg, 'pull', remote.url(),
1350 result = self.addchangegroup(cg, 'pull', remote.url(),
1351 lock=lock)
1351 lock=lock)
1352 finally:
1352 finally:
1353 lock.release()
1353 lock.release()
1354
1354
1355 return result
1355 return result
1356
1356
1357 def checkpush(self, force, revs):
1357 def checkpush(self, force, revs):
1358 """Extensions can override this function if additional checks have
1358 """Extensions can override this function if additional checks have
1359 to be performed before pushing, or call it if they override push
1359 to be performed before pushing, or call it if they override push
1360 command.
1360 command.
1361 """
1361 """
1362 pass
1362 pass
1363
1363
1364 def push(self, remote, force=False, revs=None, newbranch=False):
1364 def push(self, remote, force=False, revs=None, newbranch=False):
1365 '''Push outgoing changesets (limited by revs) from the current
1365 '''Push outgoing changesets (limited by revs) from the current
1366 repository to remote. Return an integer:
1366 repository to remote. Return an integer:
1367 - 0 means HTTP error *or* nothing to push
1367 - 0 means HTTP error *or* nothing to push
1368 - 1 means we pushed and remote head count is unchanged *or*
1368 - 1 means we pushed and remote head count is unchanged *or*
1369 we have outgoing changesets but refused to push
1369 we have outgoing changesets but refused to push
1370 - other values as described by addchangegroup()
1370 - other values as described by addchangegroup()
1371 '''
1371 '''
1372 # there are two ways to push to remote repo:
1372 # there are two ways to push to remote repo:
1373 #
1373 #
1374 # addchangegroup assumes local user can lock remote
1374 # addchangegroup assumes local user can lock remote
1375 # repo (local filesystem, old ssh servers).
1375 # repo (local filesystem, old ssh servers).
1376 #
1376 #
1377 # unbundle assumes local user cannot lock remote repo (new ssh
1377 # unbundle assumes local user cannot lock remote repo (new ssh
1378 # servers, http servers).
1378 # servers, http servers).
1379
1379
1380 self.checkpush(force, revs)
1380 self.checkpush(force, revs)
1381 lock = None
1381 lock = None
1382 unbundle = remote.capable('unbundle')
1382 unbundle = remote.capable('unbundle')
1383 if not unbundle:
1383 if not unbundle:
1384 lock = remote.lock()
1384 lock = remote.lock()
1385 try:
1385 try:
1386 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1386 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1387 newbranch)
1387 newbranch)
1388 ret = remote_heads
1388 ret = remote_heads
1389 if cg is not None:
1389 if cg is not None:
1390 if unbundle:
1390 if unbundle:
1391 # local repo finds heads on server, finds out what
1391 # local repo finds heads on server, finds out what
1392 # revs it must push. once revs transferred, if server
1392 # revs it must push. once revs transferred, if server
1393 # finds it has different heads (someone else won
1393 # finds it has different heads (someone else won
1394 # commit/push race), server aborts.
1394 # commit/push race), server aborts.
1395 if force:
1395 if force:
1396 remote_heads = ['force']
1396 remote_heads = ['force']
1397 # ssh: return remote's addchangegroup()
1397 # ssh: return remote's addchangegroup()
1398 # http: return remote's addchangegroup() or 0 for error
1398 # http: return remote's addchangegroup() or 0 for error
1399 ret = remote.unbundle(cg, remote_heads, 'push')
1399 ret = remote.unbundle(cg, remote_heads, 'push')
1400 else:
1400 else:
1401 # we return an integer indicating remote head count change
1401 # we return an integer indicating remote head count change
1402 ret = remote.addchangegroup(cg, 'push', self.url(),
1402 ret = remote.addchangegroup(cg, 'push', self.url(),
1403 lock=lock)
1403 lock=lock)
1404 finally:
1404 finally:
1405 if lock is not None:
1405 if lock is not None:
1406 lock.release()
1406 lock.release()
1407
1407
1408 self.ui.debug("checking for updated bookmarks\n")
1408 self.ui.debug("checking for updated bookmarks\n")
1409 rb = remote.listkeys('bookmarks')
1409 rb = remote.listkeys('bookmarks')
1410 for k in rb.keys():
1410 for k in rb.keys():
1411 if k in self._bookmarks:
1411 if k in self._bookmarks:
1412 nr, nl = rb[k], hex(self._bookmarks[k])
1412 nr, nl = rb[k], hex(self._bookmarks[k])
1413 if nr in self:
1413 if nr in self:
1414 cr = self[nr]
1414 cr = self[nr]
1415 cl = self[nl]
1415 cl = self[nl]
1416 if cl in cr.descendants():
1416 if cl in cr.descendants():
1417 r = remote.pushkey('bookmarks', k, nr, nl)
1417 r = remote.pushkey('bookmarks', k, nr, nl)
1418 if r:
1418 if r:
1419 self.ui.status(_("updating bookmark %s\n") % k)
1419 self.ui.status(_("updating bookmark %s\n") % k)
1420 else:
1420 else:
1421 self.ui.warn(_('updating bookmark %s'
1421 self.ui.warn(_('updating bookmark %s'
1422 ' failed!\n') % k)
1422 ' failed!\n') % k)
1423
1423
1424 return ret
1424 return ret
1425
1425
1426 def changegroupinfo(self, nodes, source):
1426 def changegroupinfo(self, nodes, source):
1427 if self.ui.verbose or source == 'bundle':
1427 if self.ui.verbose or source == 'bundle':
1428 self.ui.status(_("%d changesets found\n") % len(nodes))
1428 self.ui.status(_("%d changesets found\n") % len(nodes))
1429 if self.ui.debugflag:
1429 if self.ui.debugflag:
1430 self.ui.debug("list of changesets:\n")
1430 self.ui.debug("list of changesets:\n")
1431 for node in nodes:
1431 for node in nodes:
1432 self.ui.debug("%s\n" % hex(node))
1432 self.ui.debug("%s\n" % hex(node))
1433
1433
1434 def changegroupsubset(self, bases, heads, source):
1434 def changegroupsubset(self, bases, heads, source):
1435 """Compute a changegroup consisting of all the nodes that are
1435 """Compute a changegroup consisting of all the nodes that are
1436 descendents of any of the bases and ancestors of any of the heads.
1436 descendents of any of the bases and ancestors of any of the heads.
1437 Return a chunkbuffer object whose read() method will return
1437 Return a chunkbuffer object whose read() method will return
1438 successive changegroup chunks.
1438 successive changegroup chunks.
1439
1439
1440 It is fairly complex as determining which filenodes and which
1440 It is fairly complex as determining which filenodes and which
1441 manifest nodes need to be included for the changeset to be complete
1441 manifest nodes need to be included for the changeset to be complete
1442 is non-trivial.
1442 is non-trivial.
1443
1443
1444 Another wrinkle is doing the reverse, figuring out which changeset in
1444 Another wrinkle is doing the reverse, figuring out which changeset in
1445 the changegroup a particular filenode or manifestnode belongs to.
1445 the changegroup a particular filenode or manifestnode belongs to.
1446 """
1446 """
1447 cl = self.changelog
1447 cl = self.changelog
1448 if not bases:
1448 if not bases:
1449 bases = [nullid]
1449 bases = [nullid]
1450 csets, bases, heads = cl.nodesbetween(bases, heads)
1450 csets, bases, heads = cl.nodesbetween(bases, heads)
1451 # We assume that all ancestors of bases are known
1451 # We assume that all ancestors of bases are known
1452 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1452 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1453 return self._changegroupsubset(common, csets, heads, source)
1453 return self._changegroupsubset(common, csets, heads, source)
1454
1454
1455 def getbundle(self, source, heads=None, common=None):
1455 def getbundle(self, source, heads=None, common=None):
1456 """Like changegroupsubset, but returns the set difference between the
1456 """Like changegroupsubset, but returns the set difference between the
1457 ancestors of heads and the ancestors common.
1457 ancestors of heads and the ancestors common.
1458
1458
1459 If heads is None, use the local heads. If common is None, use [nullid].
1459 If heads is None, use the local heads. If common is None, use [nullid].
1460
1460
1461 The nodes in common might not all be known locally due to the way the
1461 The nodes in common might not all be known locally due to the way the
1462 current discovery protocol works.
1462 current discovery protocol works.
1463 """
1463 """
1464 cl = self.changelog
1464 cl = self.changelog
1465 if common:
1465 if common:
1466 nm = cl.nodemap
1466 nm = cl.nodemap
1467 common = [n for n in common if n in nm]
1467 common = [n for n in common if n in nm]
1468 else:
1468 else:
1469 common = [nullid]
1469 common = [nullid]
1470 if not heads:
1470 if not heads:
1471 heads = cl.heads()
1471 heads = cl.heads()
1472 common, missing = cl.findcommonmissing(common, heads)
1472 common, missing = cl.findcommonmissing(common, heads)
1473 return self._changegroupsubset(common, missing, heads, source)
1473 return self._changegroupsubset(common, missing, heads, source)
1474
1474
1475 def _changegroupsubset(self, commonrevs, csets, heads, source):
1475 def _changegroupsubset(self, commonrevs, csets, heads, source):
1476
1476
1477 cl = self.changelog
1477 cl = self.changelog
1478 mf = self.manifest
1478 mf = self.manifest
1479 mfs = {} # needed manifests
1479 mfs = {} # needed manifests
1480 fnodes = {} # needed file nodes
1480 fnodes = {} # needed file nodes
1481
1481
1482 # can we go through the fast path ?
1482 # can we go through the fast path ?
1483 heads.sort()
1483 heads.sort()
1484 if heads == sorted(self.heads()):
1484 if heads == sorted(self.heads()):
1485 return self._changegroup(csets, source)
1485 return self._changegroup(csets, source)
1486
1486
1487 # slow path
1487 # slow path
1488 self.hook('preoutgoing', throw=True, source=source)
1488 self.hook('preoutgoing', throw=True, source=source)
1489 self.changegroupinfo(csets, source)
1489 self.changegroupinfo(csets, source)
1490
1490
1491 # If we determine that a particular file or manifest node must be a
1491 # If we determine that a particular file or manifest node must be a
1492 # node that the recipient of the changegroup will already have, we can
1492 # node that the recipient of the changegroup will already have, we can
1493 # also assume the recipient will have all the parents. This function
1493 # also assume the recipient will have all the parents. This function
1494 # prunes them from the set of missing nodes.
1494 # prunes them from the set of missing nodes.
1495 def prune(revlog, missingnodes):
1495 def prune(revlog, missingnodes):
1496 # drop any nodes that claim to be part of a cset in commonrevs
1496 # drop any nodes that claim to be part of a cset in commonrevs
1497 drop = set()
1497 drop = set()
1498 for n in missingnodes:
1498 for n in missingnodes:
1499 if revlog.linkrev(revlog.rev(n)) in commonrevs:
1499 if revlog.linkrev(revlog.rev(n)) in commonrevs:
1500 drop.add(n)
1500 drop.add(n)
1501 for n in drop:
1501 for n in drop:
1502 missingnodes.pop(n, None)
1502 missingnodes.pop(n, None)
1503
1503
1504 # Now that we have all theses utility functions to help out and
1504 # Now that we have all theses utility functions to help out and
1505 # logically divide up the task, generate the group.
1505 # logically divide up the task, generate the group.
1506 def gengroup():
1506 def gengroup():
1507 # The set of changed files starts empty.
1507 # The set of changed files starts empty.
1508 changedfiles = set()
1508 changedfiles = set()
1509
1509
1510 count = [0]
1510 count = [0]
1511 def clookup(revlog, x):
1511 def clookup(revlog, x):
1512 c = cl.read(x)
1512 c = cl.read(x)
1513 changedfiles.update(c[3])
1513 changedfiles.update(c[3])
1514 mfs.setdefault(c[0], x)
1514 mfs.setdefault(c[0], x)
1515 count[0] += 1
1515 count[0] += 1
1516 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1516 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1517 return x
1517 return x
1518
1518
1519 # Create a changenode group generator that will call our functions
1519 # Create a changenode group generator that will call our functions
1520 # back to lookup the owning changenode and collect information.
1520 # back to lookup the owning changenode and collect information.
1521 for chunk in cl.group(csets, clookup):
1521 for chunk in cl.group(csets, clookup):
1522 yield chunk
1522 yield chunk
1523 changecount = count[0]
1523 changecount = count[0]
1524 efiles = len(changedfiles)
1524 efiles = len(changedfiles)
1525 self.ui.progress(_('bundling'), None)
1525 self.ui.progress(_('bundling'), None)
1526
1526
1527 prune(mf, mfs)
1527 prune(mf, mfs)
1528 # Create a generator for the manifestnodes that calls our lookup
1528 # Create a generator for the manifestnodes that calls our lookup
1529 # and data collection functions back.
1529 # and data collection functions back.
1530 count = [0]
1530 count = [0]
1531 def mlookup(revlog, x):
1531 def mlookup(revlog, x):
1532 clnode = mfs[x]
1532 clnode = mfs[x]
1533 mdata = mf.readfast(x)
1533 mdata = mf.readfast(x)
1534 for f in changedfiles:
1534 for f in changedfiles:
1535 if f in mdata:
1535 if f in mdata:
1536 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1536 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1537 count[0] += 1
1537 count[0] += 1
1538 self.ui.progress(_('bundling'), count[0],
1538 self.ui.progress(_('bundling'), count[0],
1539 unit=_('manifests'), total=changecount)
1539 unit=_('manifests'), total=changecount)
1540 return mfs[x]
1540 return mfs[x]
1541
1541
1542 for chunk in mf.group(mfs, mlookup):
1542 for chunk in mf.group(mfs, mlookup):
1543 yield chunk
1543 yield chunk
1544 self.ui.progress(_('bundling'), None)
1544 self.ui.progress(_('bundling'), None)
1545
1545
1546 mfs.clear()
1546 mfs.clear()
1547
1547
1548 # Go through all our files in order sorted by name.
1548 # Go through all our files in order sorted by name.
1549 for idx, fname in enumerate(sorted(changedfiles)):
1549 for idx, fname in enumerate(sorted(changedfiles)):
1550 filerevlog = self.file(fname)
1550 filerevlog = self.file(fname)
1551 if not len(filerevlog):
1551 if not len(filerevlog):
1552 raise util.Abort(_("empty or missing revlog for %s") % fname)
1552 raise util.Abort(_("empty or missing revlog for %s") % fname)
1553 # Toss out the filenodes that the recipient isn't really
1553 # Toss out the filenodes that the recipient isn't really
1554 # missing.
1554 # missing.
1555 missingfnodes = fnodes.pop(fname, {})
1555 missingfnodes = fnodes.pop(fname, {})
1556 prune(filerevlog, missingfnodes)
1556 prune(filerevlog, missingfnodes)
1557 # If any filenodes are left, generate the group for them,
1557 first = True
1558 # otherwise don't bother.
1559 if missingfnodes:
1560 yield changegroup.chunkheader(len(fname))
1561 yield fname
1562 # Create a group generator and only pass in a changenode
1563 # lookup function as we need to collect no information
1564 # from filenodes.
1565 def flookup(revlog, x):
1566 # even though we print the same progress on
1567 # most loop iterations, put the progress call
1568 # here so that time estimates (if any) can be updated
1569 self.ui.progress(
1570 _('bundling'), idx, item=fname,
1571 unit=_('files'), total=efiles)
1572 return missingfnodes[x]
1573
1558
1574 for chunk in filerevlog.group(missingfnodes, flookup):
1559 def flookup(revlog, x):
1575 yield chunk
1560 # even though we print the same progress on
1561 # most loop iterations, put the progress call
1562 # here so that time estimates (if any) can be updated
1563 self.ui.progress(
1564 _('bundling'), idx, item=fname,
1565 unit=_('files'), total=efiles)
1566 return missingfnodes[x]
1567
1568 for chunk in filerevlog.group(missingfnodes, flookup):
1569 if first:
1570 if chunk == changegroup.closechunk():
1571 break
1572 yield changegroup.chunkheader(len(fname))
1573 yield fname
1574 first = False
1575 yield chunk
1576 # Signal that no more groups are left.
1576 # Signal that no more groups are left.
1577 yield changegroup.closechunk()
1577 yield changegroup.closechunk()
1578 self.ui.progress(_('bundling'), None)
1578 self.ui.progress(_('bundling'), None)
1579
1579
1580 if csets:
1580 if csets:
1581 self.hook('outgoing', node=hex(csets[0]), source=source)
1581 self.hook('outgoing', node=hex(csets[0]), source=source)
1582
1582
1583 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1583 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1584
1584
1585 def changegroup(self, basenodes, source):
1585 def changegroup(self, basenodes, source):
1586 # to avoid a race we use changegroupsubset() (issue1320)
1586 # to avoid a race we use changegroupsubset() (issue1320)
1587 return self.changegroupsubset(basenodes, self.heads(), source)
1587 return self.changegroupsubset(basenodes, self.heads(), source)
1588
1588
1589 def _changegroup(self, nodes, source):
1589 def _changegroup(self, nodes, source):
1590 """Compute the changegroup of all nodes that we have that a recipient
1590 """Compute the changegroup of all nodes that we have that a recipient
1591 doesn't. Return a chunkbuffer object whose read() method will return
1591 doesn't. Return a chunkbuffer object whose read() method will return
1592 successive changegroup chunks.
1592 successive changegroup chunks.
1593
1593
1594 This is much easier than the previous function as we can assume that
1594 This is much easier than the previous function as we can assume that
1595 the recipient has any changenode we aren't sending them.
1595 the recipient has any changenode we aren't sending them.
1596
1596
1597 nodes is the set of nodes to send"""
1597 nodes is the set of nodes to send"""
1598
1598
1599 self.hook('preoutgoing', throw=True, source=source)
1599 self.hook('preoutgoing', throw=True, source=source)
1600
1600
1601 cl = self.changelog
1601 cl = self.changelog
1602 revset = set([cl.rev(n) for n in nodes])
1602 revset = set([cl.rev(n) for n in nodes])
1603 self.changegroupinfo(nodes, source)
1603 self.changegroupinfo(nodes, source)
1604
1604
1605 def gennodelst(log):
1605 def gennodelst(log):
1606 for r in log:
1606 for r in log:
1607 if log.linkrev(r) in revset:
1607 if log.linkrev(r) in revset:
1608 yield log.node(r)
1608 yield log.node(r)
1609
1609
1610 def gengroup():
1610 def gengroup():
1611 '''yield a sequence of changegroup chunks (strings)'''
1611 '''yield a sequence of changegroup chunks (strings)'''
1612 # construct a list of all changed files
1612 # construct a list of all changed files
1613 changedfiles = set()
1613 changedfiles = set()
1614 mmfs = {}
1614 mmfs = {}
1615
1615
1616 count = [0]
1616 count = [0]
1617 def clookup(revlog, x):
1617 def clookup(revlog, x):
1618 c = cl.read(x)
1618 c = cl.read(x)
1619 changedfiles.update(c[3])
1619 changedfiles.update(c[3])
1620 mmfs.setdefault(c[0], x)
1620 mmfs.setdefault(c[0], x)
1621 count[0] += 1
1621 count[0] += 1
1622 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1622 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1623 return x
1623 return x
1624
1624
1625 for chunk in cl.group(nodes, clookup):
1625 for chunk in cl.group(nodes, clookup):
1626 yield chunk
1626 yield chunk
1627 efiles = len(changedfiles)
1627 efiles = len(changedfiles)
1628 changecount = count[0]
1628 changecount = count[0]
1629 self.ui.progress(_('bundling'), None)
1629 self.ui.progress(_('bundling'), None)
1630
1630
1631 mnfst = self.manifest
1631 mnfst = self.manifest
1632 nodeiter = gennodelst(mnfst)
1632 nodeiter = gennodelst(mnfst)
1633 count = [0]
1633 count = [0]
1634 def mlookup(revlog, x):
1634 def mlookup(revlog, x):
1635 count[0] += 1
1635 count[0] += 1
1636 self.ui.progress(_('bundling'), count[0],
1636 self.ui.progress(_('bundling'), count[0],
1637 unit=_('manifests'), total=changecount)
1637 unit=_('manifests'), total=changecount)
1638 return cl.node(revlog.linkrev(revlog.rev(x)))
1638 return cl.node(revlog.linkrev(revlog.rev(x)))
1639
1639
1640 for chunk in mnfst.group(nodeiter, mlookup):
1640 for chunk in mnfst.group(nodeiter, mlookup):
1641 yield chunk
1641 yield chunk
1642 self.ui.progress(_('bundling'), None)
1642 self.ui.progress(_('bundling'), None)
1643
1643
1644 for idx, fname in enumerate(sorted(changedfiles)):
1644 for idx, fname in enumerate(sorted(changedfiles)):
1645 filerevlog = self.file(fname)
1645 filerevlog = self.file(fname)
1646 if not len(filerevlog):
1646 if not len(filerevlog):
1647 raise util.Abort(_("empty or missing revlog for %s") % fname)
1647 raise util.Abort(_("empty or missing revlog for %s") % fname)
1648 first = True
1648 nodeiter = gennodelst(filerevlog)
1649 nodeiter = gennodelst(filerevlog)
1649 nodeiter = list(nodeiter)
1650 def flookup(revlog, x):
1650 if nodeiter:
1651 self.ui.progress(
1651 yield changegroup.chunkheader(len(fname))
1652 _('bundling'), idx, item=fname,
1652 yield fname
1653 total=efiles, unit=_('files'))
1653 def flookup(revlog, x):
1654 return cl.node(revlog.linkrev(revlog.rev(x)))
1654 self.ui.progress(
1655 _('bundling'), idx, item=fname,
1656 total=efiles, unit=_('files'))
1657 return cl.node(revlog.linkrev(revlog.rev(x)))
1658
1655
1659 for chunk in filerevlog.group(nodeiter, flookup):
1656 for chunk in filerevlog.group(nodeiter, flookup):
1660 yield chunk
1657 if first:
1658 if chunk == changegroup.closechunk():
1659 break
1660 yield changegroup.chunkheader(len(fname))
1661 yield fname
1662 first = False
1663 yield chunk
1661 self.ui.progress(_('bundling'), None)
1664 self.ui.progress(_('bundling'), None)
1662
1665
1663 yield changegroup.closechunk()
1666 yield changegroup.closechunk()
1664
1667
1665 if nodes:
1668 if nodes:
1666 self.hook('outgoing', node=hex(nodes[0]), source=source)
1669 self.hook('outgoing', node=hex(nodes[0]), source=source)
1667
1670
1668 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1671 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1669
1672
1670 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1673 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1671 """Add the changegroup returned by source.read() to this repo.
1674 """Add the changegroup returned by source.read() to this repo.
1672 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1675 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1673 the URL of the repo where this changegroup is coming from.
1676 the URL of the repo where this changegroup is coming from.
1674 If lock is not None, the function takes ownership of the lock
1677 If lock is not None, the function takes ownership of the lock
1675 and releases it after the changegroup is added.
1678 and releases it after the changegroup is added.
1676
1679
1677 Return an integer summarizing the change to this repo:
1680 Return an integer summarizing the change to this repo:
1678 - nothing changed or no source: 0
1681 - nothing changed or no source: 0
1679 - more heads than before: 1+added heads (2..n)
1682 - more heads than before: 1+added heads (2..n)
1680 - fewer heads than before: -1-removed heads (-2..-n)
1683 - fewer heads than before: -1-removed heads (-2..-n)
1681 - number of heads stays the same: 1
1684 - number of heads stays the same: 1
1682 """
1685 """
1683 def csmap(x):
1686 def csmap(x):
1684 self.ui.debug("add changeset %s\n" % short(x))
1687 self.ui.debug("add changeset %s\n" % short(x))
1685 return len(cl)
1688 return len(cl)
1686
1689
1687 def revmap(x):
1690 def revmap(x):
1688 return cl.rev(x)
1691 return cl.rev(x)
1689
1692
1690 if not source:
1693 if not source:
1691 return 0
1694 return 0
1692
1695
1693 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1696 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1694
1697
1695 changesets = files = revisions = 0
1698 changesets = files = revisions = 0
1696 efiles = set()
1699 efiles = set()
1697
1700
1698 # write changelog data to temp files so concurrent readers will not see
1701 # write changelog data to temp files so concurrent readers will not see
1699 # inconsistent view
1702 # inconsistent view
1700 cl = self.changelog
1703 cl = self.changelog
1701 cl.delayupdate()
1704 cl.delayupdate()
1702 oldheads = len(cl.heads())
1705 oldheads = len(cl.heads())
1703
1706
1704 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1707 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1705 try:
1708 try:
1706 trp = weakref.proxy(tr)
1709 trp = weakref.proxy(tr)
1707 # pull off the changeset group
1710 # pull off the changeset group
1708 self.ui.status(_("adding changesets\n"))
1711 self.ui.status(_("adding changesets\n"))
1709 clstart = len(cl)
1712 clstart = len(cl)
1710 class prog(object):
1713 class prog(object):
1711 step = _('changesets')
1714 step = _('changesets')
1712 count = 1
1715 count = 1
1713 ui = self.ui
1716 ui = self.ui
1714 total = None
1717 total = None
1715 def __call__(self):
1718 def __call__(self):
1716 self.ui.progress(self.step, self.count, unit=_('chunks'),
1719 self.ui.progress(self.step, self.count, unit=_('chunks'),
1717 total=self.total)
1720 total=self.total)
1718 self.count += 1
1721 self.count += 1
1719 pr = prog()
1722 pr = prog()
1720 source.callback = pr
1723 source.callback = pr
1721
1724
1722 if (cl.addgroup(source, csmap, trp) is None
1725 if (cl.addgroup(source, csmap, trp) is None
1723 and not emptyok):
1726 and not emptyok):
1724 raise util.Abort(_("received changelog group is empty"))
1727 raise util.Abort(_("received changelog group is empty"))
1725 clend = len(cl)
1728 clend = len(cl)
1726 changesets = clend - clstart
1729 changesets = clend - clstart
1727 for c in xrange(clstart, clend):
1730 for c in xrange(clstart, clend):
1728 efiles.update(self[c].files())
1731 efiles.update(self[c].files())
1729 efiles = len(efiles)
1732 efiles = len(efiles)
1730 self.ui.progress(_('changesets'), None)
1733 self.ui.progress(_('changesets'), None)
1731
1734
1732 # pull off the manifest group
1735 # pull off the manifest group
1733 self.ui.status(_("adding manifests\n"))
1736 self.ui.status(_("adding manifests\n"))
1734 pr.step = _('manifests')
1737 pr.step = _('manifests')
1735 pr.count = 1
1738 pr.count = 1
1736 pr.total = changesets # manifests <= changesets
1739 pr.total = changesets # manifests <= changesets
1737 # no need to check for empty manifest group here:
1740 # no need to check for empty manifest group here:
1738 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1741 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1739 # no new manifest will be created and the manifest group will
1742 # no new manifest will be created and the manifest group will
1740 # be empty during the pull
1743 # be empty during the pull
1741 self.manifest.addgroup(source, revmap, trp)
1744 self.manifest.addgroup(source, revmap, trp)
1742 self.ui.progress(_('manifests'), None)
1745 self.ui.progress(_('manifests'), None)
1743
1746
1744 needfiles = {}
1747 needfiles = {}
1745 if self.ui.configbool('server', 'validate', default=False):
1748 if self.ui.configbool('server', 'validate', default=False):
1746 # validate incoming csets have their manifests
1749 # validate incoming csets have their manifests
1747 for cset in xrange(clstart, clend):
1750 for cset in xrange(clstart, clend):
1748 mfest = self.changelog.read(self.changelog.node(cset))[0]
1751 mfest = self.changelog.read(self.changelog.node(cset))[0]
1749 mfest = self.manifest.readdelta(mfest)
1752 mfest = self.manifest.readdelta(mfest)
1750 # store file nodes we must see
1753 # store file nodes we must see
1751 for f, n in mfest.iteritems():
1754 for f, n in mfest.iteritems():
1752 needfiles.setdefault(f, set()).add(n)
1755 needfiles.setdefault(f, set()).add(n)
1753
1756
1754 # process the files
1757 # process the files
1755 self.ui.status(_("adding file changes\n"))
1758 self.ui.status(_("adding file changes\n"))
1756 pr.step = 'files'
1759 pr.step = 'files'
1757 pr.count = 1
1760 pr.count = 1
1758 pr.total = efiles
1761 pr.total = efiles
1759 source.callback = None
1762 source.callback = None
1760
1763
1761 while 1:
1764 while 1:
1762 f = source.chunk()
1765 f = source.chunk()
1763 if not f:
1766 if not f:
1764 break
1767 break
1765 self.ui.debug("adding %s revisions\n" % f)
1768 self.ui.debug("adding %s revisions\n" % f)
1766 pr()
1769 pr()
1767 fl = self.file(f)
1770 fl = self.file(f)
1768 o = len(fl)
1771 o = len(fl)
1769 if fl.addgroup(source, revmap, trp) is None:
1772 if fl.addgroup(source, revmap, trp) is None:
1770 raise util.Abort(_("received file revlog group is empty"))
1773 raise util.Abort(_("received file revlog group is empty"))
1771 revisions += len(fl) - o
1774 revisions += len(fl) - o
1772 files += 1
1775 files += 1
1773 if f in needfiles:
1776 if f in needfiles:
1774 needs = needfiles[f]
1777 needs = needfiles[f]
1775 for new in xrange(o, len(fl)):
1778 for new in xrange(o, len(fl)):
1776 n = fl.node(new)
1779 n = fl.node(new)
1777 if n in needs:
1780 if n in needs:
1778 needs.remove(n)
1781 needs.remove(n)
1779 if not needs:
1782 if not needs:
1780 del needfiles[f]
1783 del needfiles[f]
1781 self.ui.progress(_('files'), None)
1784 self.ui.progress(_('files'), None)
1782
1785
1783 for f, needs in needfiles.iteritems():
1786 for f, needs in needfiles.iteritems():
1784 fl = self.file(f)
1787 fl = self.file(f)
1785 for n in needs:
1788 for n in needs:
1786 try:
1789 try:
1787 fl.rev(n)
1790 fl.rev(n)
1788 except error.LookupError:
1791 except error.LookupError:
1789 raise util.Abort(
1792 raise util.Abort(
1790 _('missing file data for %s:%s - run hg verify') %
1793 _('missing file data for %s:%s - run hg verify') %
1791 (f, hex(n)))
1794 (f, hex(n)))
1792
1795
1793 newheads = len(cl.heads())
1796 newheads = len(cl.heads())
1794 heads = ""
1797 heads = ""
1795 if oldheads and newheads != oldheads:
1798 if oldheads and newheads != oldheads:
1796 heads = _(" (%+d heads)") % (newheads - oldheads)
1799 heads = _(" (%+d heads)") % (newheads - oldheads)
1797
1800
1798 self.ui.status(_("added %d changesets"
1801 self.ui.status(_("added %d changesets"
1799 " with %d changes to %d files%s\n")
1802 " with %d changes to %d files%s\n")
1800 % (changesets, revisions, files, heads))
1803 % (changesets, revisions, files, heads))
1801
1804
1802 if changesets > 0:
1805 if changesets > 0:
1803 p = lambda: cl.writepending() and self.root or ""
1806 p = lambda: cl.writepending() and self.root or ""
1804 self.hook('pretxnchangegroup', throw=True,
1807 self.hook('pretxnchangegroup', throw=True,
1805 node=hex(cl.node(clstart)), source=srctype,
1808 node=hex(cl.node(clstart)), source=srctype,
1806 url=url, pending=p)
1809 url=url, pending=p)
1807
1810
1808 # make changelog see real files again
1811 # make changelog see real files again
1809 cl.finalize(trp)
1812 cl.finalize(trp)
1810
1813
1811 tr.close()
1814 tr.close()
1812 finally:
1815 finally:
1813 tr.release()
1816 tr.release()
1814 if lock:
1817 if lock:
1815 lock.release()
1818 lock.release()
1816
1819
1817 if changesets > 0:
1820 if changesets > 0:
1818 # forcefully update the on-disk branch cache
1821 # forcefully update the on-disk branch cache
1819 self.ui.debug("updating the branch cache\n")
1822 self.ui.debug("updating the branch cache\n")
1820 self.updatebranchcache()
1823 self.updatebranchcache()
1821 self.hook("changegroup", node=hex(cl.node(clstart)),
1824 self.hook("changegroup", node=hex(cl.node(clstart)),
1822 source=srctype, url=url)
1825 source=srctype, url=url)
1823
1826
1824 for i in xrange(clstart, clend):
1827 for i in xrange(clstart, clend):
1825 self.hook("incoming", node=hex(cl.node(i)),
1828 self.hook("incoming", node=hex(cl.node(i)),
1826 source=srctype, url=url)
1829 source=srctype, url=url)
1827
1830
1828 # never return 0 here:
1831 # never return 0 here:
1829 if newheads < oldheads:
1832 if newheads < oldheads:
1830 return newheads - oldheads - 1
1833 return newheads - oldheads - 1
1831 else:
1834 else:
1832 return newheads - oldheads + 1
1835 return newheads - oldheads + 1
1833
1836
1834
1837
1835 def stream_in(self, remote, requirements):
1838 def stream_in(self, remote, requirements):
1836 lock = self.lock()
1839 lock = self.lock()
1837 try:
1840 try:
1838 fp = remote.stream_out()
1841 fp = remote.stream_out()
1839 l = fp.readline()
1842 l = fp.readline()
1840 try:
1843 try:
1841 resp = int(l)
1844 resp = int(l)
1842 except ValueError:
1845 except ValueError:
1843 raise error.ResponseError(
1846 raise error.ResponseError(
1844 _('Unexpected response from remote server:'), l)
1847 _('Unexpected response from remote server:'), l)
1845 if resp == 1:
1848 if resp == 1:
1846 raise util.Abort(_('operation forbidden by server'))
1849 raise util.Abort(_('operation forbidden by server'))
1847 elif resp == 2:
1850 elif resp == 2:
1848 raise util.Abort(_('locking the remote repository failed'))
1851 raise util.Abort(_('locking the remote repository failed'))
1849 elif resp != 0:
1852 elif resp != 0:
1850 raise util.Abort(_('the server sent an unknown error code'))
1853 raise util.Abort(_('the server sent an unknown error code'))
1851 self.ui.status(_('streaming all changes\n'))
1854 self.ui.status(_('streaming all changes\n'))
1852 l = fp.readline()
1855 l = fp.readline()
1853 try:
1856 try:
1854 total_files, total_bytes = map(int, l.split(' ', 1))
1857 total_files, total_bytes = map(int, l.split(' ', 1))
1855 except (ValueError, TypeError):
1858 except (ValueError, TypeError):
1856 raise error.ResponseError(
1859 raise error.ResponseError(
1857 _('Unexpected response from remote server:'), l)
1860 _('Unexpected response from remote server:'), l)
1858 self.ui.status(_('%d files to transfer, %s of data\n') %
1861 self.ui.status(_('%d files to transfer, %s of data\n') %
1859 (total_files, util.bytecount(total_bytes)))
1862 (total_files, util.bytecount(total_bytes)))
1860 start = time.time()
1863 start = time.time()
1861 for i in xrange(total_files):
1864 for i in xrange(total_files):
1862 # XXX doesn't support '\n' or '\r' in filenames
1865 # XXX doesn't support '\n' or '\r' in filenames
1863 l = fp.readline()
1866 l = fp.readline()
1864 try:
1867 try:
1865 name, size = l.split('\0', 1)
1868 name, size = l.split('\0', 1)
1866 size = int(size)
1869 size = int(size)
1867 except (ValueError, TypeError):
1870 except (ValueError, TypeError):
1868 raise error.ResponseError(
1871 raise error.ResponseError(
1869 _('Unexpected response from remote server:'), l)
1872 _('Unexpected response from remote server:'), l)
1870 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1873 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1871 # for backwards compat, name was partially encoded
1874 # for backwards compat, name was partially encoded
1872 ofp = self.sopener(store.decodedir(name), 'w')
1875 ofp = self.sopener(store.decodedir(name), 'w')
1873 for chunk in util.filechunkiter(fp, limit=size):
1876 for chunk in util.filechunkiter(fp, limit=size):
1874 ofp.write(chunk)
1877 ofp.write(chunk)
1875 ofp.close()
1878 ofp.close()
1876 elapsed = time.time() - start
1879 elapsed = time.time() - start
1877 if elapsed <= 0:
1880 if elapsed <= 0:
1878 elapsed = 0.001
1881 elapsed = 0.001
1879 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1882 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1880 (util.bytecount(total_bytes), elapsed,
1883 (util.bytecount(total_bytes), elapsed,
1881 util.bytecount(total_bytes / elapsed)))
1884 util.bytecount(total_bytes / elapsed)))
1882
1885
1883 # new requirements = old non-format requirements + new format-related
1886 # new requirements = old non-format requirements + new format-related
1884 # requirements from the streamed-in repository
1887 # requirements from the streamed-in repository
1885 requirements.update(set(self.requirements) - self.supportedformats)
1888 requirements.update(set(self.requirements) - self.supportedformats)
1886 self._applyrequirements(requirements)
1889 self._applyrequirements(requirements)
1887 self._writerequirements()
1890 self._writerequirements()
1888
1891
1889 self.invalidate()
1892 self.invalidate()
1890 return len(self.heads()) + 1
1893 return len(self.heads()) + 1
1891 finally:
1894 finally:
1892 lock.release()
1895 lock.release()
1893
1896
1894 def clone(self, remote, heads=[], stream=False):
1897 def clone(self, remote, heads=[], stream=False):
1895 '''clone remote repository.
1898 '''clone remote repository.
1896
1899
1897 keyword arguments:
1900 keyword arguments:
1898 heads: list of revs to clone (forces use of pull)
1901 heads: list of revs to clone (forces use of pull)
1899 stream: use streaming clone if possible'''
1902 stream: use streaming clone if possible'''
1900
1903
1901 # now, all clients that can request uncompressed clones can
1904 # now, all clients that can request uncompressed clones can
1902 # read repo formats supported by all servers that can serve
1905 # read repo formats supported by all servers that can serve
1903 # them.
1906 # them.
1904
1907
1905 # if revlog format changes, client will have to check version
1908 # if revlog format changes, client will have to check version
1906 # and format flags on "stream" capability, and use
1909 # and format flags on "stream" capability, and use
1907 # uncompressed only if compatible.
1910 # uncompressed only if compatible.
1908
1911
1909 if stream and not heads:
1912 if stream and not heads:
1910 # 'stream' means remote revlog format is revlogv1 only
1913 # 'stream' means remote revlog format is revlogv1 only
1911 if remote.capable('stream'):
1914 if remote.capable('stream'):
1912 return self.stream_in(remote, set(('revlogv1',)))
1915 return self.stream_in(remote, set(('revlogv1',)))
1913 # otherwise, 'streamreqs' contains the remote revlog format
1916 # otherwise, 'streamreqs' contains the remote revlog format
1914 streamreqs = remote.capable('streamreqs')
1917 streamreqs = remote.capable('streamreqs')
1915 if streamreqs:
1918 if streamreqs:
1916 streamreqs = set(streamreqs.split(','))
1919 streamreqs = set(streamreqs.split(','))
1917 # if we support it, stream in and adjust our requirements
1920 # if we support it, stream in and adjust our requirements
1918 if not streamreqs - self.supportedformats:
1921 if not streamreqs - self.supportedformats:
1919 return self.stream_in(remote, streamreqs)
1922 return self.stream_in(remote, streamreqs)
1920 return self.pull(remote, heads)
1923 return self.pull(remote, heads)
1921
1924
1922 def pushkey(self, namespace, key, old, new):
1925 def pushkey(self, namespace, key, old, new):
1923 return pushkey.push(self, namespace, key, old, new)
1926 return pushkey.push(self, namespace, key, old, new)
1924
1927
1925 def listkeys(self, namespace):
1928 def listkeys(self, namespace):
1926 return pushkey.list(self, namespace)
1929 return pushkey.list(self, namespace)
1927
1930
1928 def debugwireargs(self, one, two, three=None, four=None):
1931 def debugwireargs(self, one, two, three=None, four=None):
1929 '''used to test argument passing over the wire'''
1932 '''used to test argument passing over the wire'''
1930 return "%s %s %s %s" % (one, two, three, four)
1933 return "%s %s %s %s" % (one, two, three, four)
1931
1934
1932 # used to avoid circular references so destructors work
1935 # used to avoid circular references so destructors work
1933 def aftertrans(files):
1936 def aftertrans(files):
1934 renamefiles = [tuple(t) for t in files]
1937 renamefiles = [tuple(t) for t in files]
1935 def a():
1938 def a():
1936 for src, dest in renamefiles:
1939 for src, dest in renamefiles:
1937 util.rename(src, dest)
1940 util.rename(src, dest)
1938 return a
1941 return a
1939
1942
1940 def instance(ui, path, create):
1943 def instance(ui, path, create):
1941 return localrepository(ui, util.drop_scheme('file', path), create)
1944 return localrepository(ui, util.drop_scheme('file', path), create)
1942
1945
1943 def islocal(path):
1946 def islocal(path):
1944 return True
1947 return True
General Comments 0
You need to be logged in to leave comments. Login now