##// END OF EJS Templates
merge with crew
Matt Mackall -
r12297:a424fa60 merge default
parent child Browse files
Show More
@@ -1,1863 +1,1888 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 import url as urlmod
17 import url as urlmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 propertycache = util.propertycache
20 propertycache = util.propertycache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
24 supported = set('revlogv1 store fncache shared parentdelta'.split())
24 supportedformats = set(('revlogv1', 'parentdelta'))
25 supported = supportedformats | set(('store', 'fncache', 'shared'))
25
26
26 def __init__(self, baseui, path=None, create=0):
27 def __init__(self, baseui, path=None, create=0):
27 repo.repository.__init__(self)
28 repo.repository.__init__(self)
28 self.root = os.path.realpath(util.expandpath(path))
29 self.root = os.path.realpath(util.expandpath(path))
29 self.path = os.path.join(self.root, ".hg")
30 self.path = os.path.join(self.root, ".hg")
30 self.origroot = path
31 self.origroot = path
31 self.auditor = util.path_auditor(self.root, self._checknested)
32 self.auditor = util.path_auditor(self.root, self._checknested)
32 self.opener = util.opener(self.path)
33 self.opener = util.opener(self.path)
33 self.wopener = util.opener(self.root)
34 self.wopener = util.opener(self.root)
34 self.baseui = baseui
35 self.baseui = baseui
35 self.ui = baseui.copy()
36 self.ui = baseui.copy()
36
37
37 try:
38 try:
38 self.ui.readconfig(self.join("hgrc"), self.root)
39 self.ui.readconfig(self.join("hgrc"), self.root)
39 extensions.loadall(self.ui)
40 extensions.loadall(self.ui)
40 except IOError:
41 except IOError:
41 pass
42 pass
42
43
43 if not os.path.isdir(self.path):
44 if not os.path.isdir(self.path):
44 if create:
45 if create:
45 if not os.path.exists(path):
46 if not os.path.exists(path):
46 util.makedirs(path)
47 util.makedirs(path)
47 os.mkdir(self.path)
48 os.mkdir(self.path)
48 requirements = ["revlogv1"]
49 requirements = ["revlogv1"]
49 if self.ui.configbool('format', 'usestore', True):
50 if self.ui.configbool('format', 'usestore', True):
50 os.mkdir(os.path.join(self.path, "store"))
51 os.mkdir(os.path.join(self.path, "store"))
51 requirements.append("store")
52 requirements.append("store")
52 if self.ui.configbool('format', 'usefncache', True):
53 if self.ui.configbool('format', 'usefncache', True):
53 requirements.append("fncache")
54 requirements.append("fncache")
54 # create an invalid changelog
55 # create an invalid changelog
55 self.opener("00changelog.i", "a").write(
56 self.opener("00changelog.i", "a").write(
56 '\0\0\0\2' # represents revlogv2
57 '\0\0\0\2' # represents revlogv2
57 ' dummy changelog to prevent using the old repo layout'
58 ' dummy changelog to prevent using the old repo layout'
58 )
59 )
59 if self.ui.configbool('format', 'parentdelta', False):
60 if self.ui.configbool('format', 'parentdelta', False):
60 requirements.append("parentdelta")
61 requirements.append("parentdelta")
61 reqfile = self.opener("requires", "w")
62 for r in requirements:
63 reqfile.write("%s\n" % r)
64 reqfile.close()
65 else:
62 else:
66 raise error.RepoError(_("repository %s not found") % path)
63 raise error.RepoError(_("repository %s not found") % path)
67 elif create:
64 elif create:
68 raise error.RepoError(_("repository %s already exists") % path)
65 raise error.RepoError(_("repository %s already exists") % path)
69 else:
66 else:
70 # find requirements
67 # find requirements
71 requirements = set()
68 requirements = set()
72 try:
69 try:
73 requirements = set(self.opener("requires").read().splitlines())
70 requirements = set(self.opener("requires").read().splitlines())
74 except IOError, inst:
71 except IOError, inst:
75 if inst.errno != errno.ENOENT:
72 if inst.errno != errno.ENOENT:
76 raise
73 raise
77 for r in requirements - self.supported:
74 for r in requirements - self.supported:
78 raise error.RepoError(_("requirement '%s' not supported") % r)
75 raise error.RepoError(_("requirement '%s' not supported") % r)
79
76
80 self.sharedpath = self.path
77 self.sharedpath = self.path
81 try:
78 try:
82 s = os.path.realpath(self.opener("sharedpath").read())
79 s = os.path.realpath(self.opener("sharedpath").read())
83 if not os.path.exists(s):
80 if not os.path.exists(s):
84 raise error.RepoError(
81 raise error.RepoError(
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
82 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 self.sharedpath = s
83 self.sharedpath = s
87 except IOError, inst:
84 except IOError, inst:
88 if inst.errno != errno.ENOENT:
85 if inst.errno != errno.ENOENT:
89 raise
86 raise
90
87
91 self.store = store.store(requirements, self.sharedpath, util.opener)
88 self.store = store.store(requirements, self.sharedpath, util.opener)
92 self.spath = self.store.path
89 self.spath = self.store.path
93 self.sopener = self.store.opener
90 self.sopener = self.store.opener
94 self.sjoin = self.store.join
91 self.sjoin = self.store.join
95 self.opener.createmode = self.store.createmode
92 self.opener.createmode = self.store.createmode
96 self.sopener.options = {}
93 self._applyrequirements(requirements)
97 if 'parentdelta' in requirements:
94 if create:
98 self.sopener.options['parentdelta'] = 1
95 self._writerequirements()
99
96
100 # These two define the set of tags for this repository. _tags
97 # These two define the set of tags for this repository. _tags
101 # maps tag name to node; _tagtypes maps tag name to 'global' or
98 # maps tag name to node; _tagtypes maps tag name to 'global' or
102 # 'local'. (Global tags are defined by .hgtags across all
99 # 'local'. (Global tags are defined by .hgtags across all
103 # heads, and local tags are defined in .hg/localtags.) They
100 # heads, and local tags are defined in .hg/localtags.) They
104 # constitute the in-memory cache of tags.
101 # constitute the in-memory cache of tags.
105 self._tags = None
102 self._tags = None
106 self._tagtypes = None
103 self._tagtypes = None
107
104
108 self._branchcache = None # in UTF-8
105 self._branchcache = None # in UTF-8
109 self._branchcachetip = None
106 self._branchcachetip = None
110 self.nodetagscache = None
107 self.nodetagscache = None
111 self.filterpats = {}
108 self.filterpats = {}
112 self._datafilters = {}
109 self._datafilters = {}
113 self._transref = self._lockref = self._wlockref = None
110 self._transref = self._lockref = self._wlockref = None
114
111
112 def _applyrequirements(self, requirements):
113 self.requirements = requirements
114 self.sopener.options = {}
115 if 'parentdelta' in requirements:
116 self.sopener.options['parentdelta'] = 1
117
118 def _writerequirements(self):
119 reqfile = self.opener("requires", "w")
120 for r in self.requirements:
121 reqfile.write("%s\n" % r)
122 reqfile.close()
123
115 def _checknested(self, path):
124 def _checknested(self, path):
116 """Determine if path is a legal nested repository."""
125 """Determine if path is a legal nested repository."""
117 if not path.startswith(self.root):
126 if not path.startswith(self.root):
118 return False
127 return False
119 subpath = path[len(self.root) + 1:]
128 subpath = path[len(self.root) + 1:]
120
129
121 # XXX: Checking against the current working copy is wrong in
130 # XXX: Checking against the current working copy is wrong in
122 # the sense that it can reject things like
131 # the sense that it can reject things like
123 #
132 #
124 # $ hg cat -r 10 sub/x.txt
133 # $ hg cat -r 10 sub/x.txt
125 #
134 #
126 # if sub/ is no longer a subrepository in the working copy
135 # if sub/ is no longer a subrepository in the working copy
127 # parent revision.
136 # parent revision.
128 #
137 #
129 # However, it can of course also allow things that would have
138 # However, it can of course also allow things that would have
130 # been rejected before, such as the above cat command if sub/
139 # been rejected before, such as the above cat command if sub/
131 # is a subrepository now, but was a normal directory before.
140 # is a subrepository now, but was a normal directory before.
132 # The old path auditor would have rejected by mistake since it
141 # The old path auditor would have rejected by mistake since it
133 # panics when it sees sub/.hg/.
142 # panics when it sees sub/.hg/.
134 #
143 #
135 # All in all, checking against the working copy seems sensible
144 # All in all, checking against the working copy seems sensible
136 # since we want to prevent access to nested repositories on
145 # since we want to prevent access to nested repositories on
137 # the filesystem *now*.
146 # the filesystem *now*.
138 ctx = self[None]
147 ctx = self[None]
139 parts = util.splitpath(subpath)
148 parts = util.splitpath(subpath)
140 while parts:
149 while parts:
141 prefix = os.sep.join(parts)
150 prefix = os.sep.join(parts)
142 if prefix in ctx.substate:
151 if prefix in ctx.substate:
143 if prefix == subpath:
152 if prefix == subpath:
144 return True
153 return True
145 else:
154 else:
146 sub = ctx.sub(prefix)
155 sub = ctx.sub(prefix)
147 return sub.checknested(subpath[len(prefix) + 1:])
156 return sub.checknested(subpath[len(prefix) + 1:])
148 else:
157 else:
149 parts.pop()
158 parts.pop()
150 return False
159 return False
151
160
152
161
153 @propertycache
162 @propertycache
154 def changelog(self):
163 def changelog(self):
155 c = changelog.changelog(self.sopener)
164 c = changelog.changelog(self.sopener)
156 if 'HG_PENDING' in os.environ:
165 if 'HG_PENDING' in os.environ:
157 p = os.environ['HG_PENDING']
166 p = os.environ['HG_PENDING']
158 if p.startswith(self.root):
167 if p.startswith(self.root):
159 c.readpending('00changelog.i.a')
168 c.readpending('00changelog.i.a')
160 self.sopener.options['defversion'] = c.version
169 self.sopener.options['defversion'] = c.version
161 return c
170 return c
162
171
163 @propertycache
172 @propertycache
164 def manifest(self):
173 def manifest(self):
165 return manifest.manifest(self.sopener)
174 return manifest.manifest(self.sopener)
166
175
167 @propertycache
176 @propertycache
168 def dirstate(self):
177 def dirstate(self):
169 return dirstate.dirstate(self.opener, self.ui, self.root)
178 return dirstate.dirstate(self.opener, self.ui, self.root)
170
179
171 def __getitem__(self, changeid):
180 def __getitem__(self, changeid):
172 if changeid is None:
181 if changeid is None:
173 return context.workingctx(self)
182 return context.workingctx(self)
174 return context.changectx(self, changeid)
183 return context.changectx(self, changeid)
175
184
176 def __contains__(self, changeid):
185 def __contains__(self, changeid):
177 try:
186 try:
178 return bool(self.lookup(changeid))
187 return bool(self.lookup(changeid))
179 except error.RepoLookupError:
188 except error.RepoLookupError:
180 return False
189 return False
181
190
182 def __nonzero__(self):
191 def __nonzero__(self):
183 return True
192 return True
184
193
185 def __len__(self):
194 def __len__(self):
186 return len(self.changelog)
195 return len(self.changelog)
187
196
188 def __iter__(self):
197 def __iter__(self):
189 for i in xrange(len(self)):
198 for i in xrange(len(self)):
190 yield i
199 yield i
191
200
192 def url(self):
201 def url(self):
193 return 'file:' + self.root
202 return 'file:' + self.root
194
203
195 def hook(self, name, throw=False, **args):
204 def hook(self, name, throw=False, **args):
196 return hook.hook(self.ui, self, name, throw, **args)
205 return hook.hook(self.ui, self, name, throw, **args)
197
206
198 tag_disallowed = ':\r\n'
207 tag_disallowed = ':\r\n'
199
208
200 def _tag(self, names, node, message, local, user, date, extra={}):
209 def _tag(self, names, node, message, local, user, date, extra={}):
201 if isinstance(names, str):
210 if isinstance(names, str):
202 allchars = names
211 allchars = names
203 names = (names,)
212 names = (names,)
204 else:
213 else:
205 allchars = ''.join(names)
214 allchars = ''.join(names)
206 for c in self.tag_disallowed:
215 for c in self.tag_disallowed:
207 if c in allchars:
216 if c in allchars:
208 raise util.Abort(_('%r cannot be used in a tag name') % c)
217 raise util.Abort(_('%r cannot be used in a tag name') % c)
209
218
210 branches = self.branchmap()
219 branches = self.branchmap()
211 for name in names:
220 for name in names:
212 self.hook('pretag', throw=True, node=hex(node), tag=name,
221 self.hook('pretag', throw=True, node=hex(node), tag=name,
213 local=local)
222 local=local)
214 if name in branches:
223 if name in branches:
215 self.ui.warn(_("warning: tag %s conflicts with existing"
224 self.ui.warn(_("warning: tag %s conflicts with existing"
216 " branch name\n") % name)
225 " branch name\n") % name)
217
226
218 def writetags(fp, names, munge, prevtags):
227 def writetags(fp, names, munge, prevtags):
219 fp.seek(0, 2)
228 fp.seek(0, 2)
220 if prevtags and prevtags[-1] != '\n':
229 if prevtags and prevtags[-1] != '\n':
221 fp.write('\n')
230 fp.write('\n')
222 for name in names:
231 for name in names:
223 m = munge and munge(name) or name
232 m = munge and munge(name) or name
224 if self._tagtypes and name in self._tagtypes:
233 if self._tagtypes and name in self._tagtypes:
225 old = self._tags.get(name, nullid)
234 old = self._tags.get(name, nullid)
226 fp.write('%s %s\n' % (hex(old), m))
235 fp.write('%s %s\n' % (hex(old), m))
227 fp.write('%s %s\n' % (hex(node), m))
236 fp.write('%s %s\n' % (hex(node), m))
228 fp.close()
237 fp.close()
229
238
230 prevtags = ''
239 prevtags = ''
231 if local:
240 if local:
232 try:
241 try:
233 fp = self.opener('localtags', 'r+')
242 fp = self.opener('localtags', 'r+')
234 except IOError:
243 except IOError:
235 fp = self.opener('localtags', 'a')
244 fp = self.opener('localtags', 'a')
236 else:
245 else:
237 prevtags = fp.read()
246 prevtags = fp.read()
238
247
239 # local tags are stored in the current charset
248 # local tags are stored in the current charset
240 writetags(fp, names, None, prevtags)
249 writetags(fp, names, None, prevtags)
241 for name in names:
250 for name in names:
242 self.hook('tag', node=hex(node), tag=name, local=local)
251 self.hook('tag', node=hex(node), tag=name, local=local)
243 return
252 return
244
253
245 try:
254 try:
246 fp = self.wfile('.hgtags', 'rb+')
255 fp = self.wfile('.hgtags', 'rb+')
247 except IOError:
256 except IOError:
248 fp = self.wfile('.hgtags', 'ab')
257 fp = self.wfile('.hgtags', 'ab')
249 else:
258 else:
250 prevtags = fp.read()
259 prevtags = fp.read()
251
260
252 # committed tags are stored in UTF-8
261 # committed tags are stored in UTF-8
253 writetags(fp, names, encoding.fromlocal, prevtags)
262 writetags(fp, names, encoding.fromlocal, prevtags)
254
263
255 if '.hgtags' not in self.dirstate:
264 if '.hgtags' not in self.dirstate:
256 self[None].add(['.hgtags'])
265 self[None].add(['.hgtags'])
257
266
258 m = matchmod.exact(self.root, '', ['.hgtags'])
267 m = matchmod.exact(self.root, '', ['.hgtags'])
259 tagnode = self.commit(message, user, date, extra=extra, match=m)
268 tagnode = self.commit(message, user, date, extra=extra, match=m)
260
269
261 for name in names:
270 for name in names:
262 self.hook('tag', node=hex(node), tag=name, local=local)
271 self.hook('tag', node=hex(node), tag=name, local=local)
263
272
264 return tagnode
273 return tagnode
265
274
266 def tag(self, names, node, message, local, user, date):
275 def tag(self, names, node, message, local, user, date):
267 '''tag a revision with one or more symbolic names.
276 '''tag a revision with one or more symbolic names.
268
277
269 names is a list of strings or, when adding a single tag, names may be a
278 names is a list of strings or, when adding a single tag, names may be a
270 string.
279 string.
271
280
272 if local is True, the tags are stored in a per-repository file.
281 if local is True, the tags are stored in a per-repository file.
273 otherwise, they are stored in the .hgtags file, and a new
282 otherwise, they are stored in the .hgtags file, and a new
274 changeset is committed with the change.
283 changeset is committed with the change.
275
284
276 keyword arguments:
285 keyword arguments:
277
286
278 local: whether to store tags in non-version-controlled file
287 local: whether to store tags in non-version-controlled file
279 (default False)
288 (default False)
280
289
281 message: commit message to use if committing
290 message: commit message to use if committing
282
291
283 user: name of user to use if committing
292 user: name of user to use if committing
284
293
285 date: date tuple to use if committing'''
294 date: date tuple to use if committing'''
286
295
287 for x in self.status()[:5]:
296 for x in self.status()[:5]:
288 if '.hgtags' in x:
297 if '.hgtags' in x:
289 raise util.Abort(_('working copy of .hgtags is changed '
298 raise util.Abort(_('working copy of .hgtags is changed '
290 '(please commit .hgtags manually)'))
299 '(please commit .hgtags manually)'))
291
300
292 self.tags() # instantiate the cache
301 self.tags() # instantiate the cache
293 self._tag(names, node, message, local, user, date)
302 self._tag(names, node, message, local, user, date)
294
303
295 def tags(self):
304 def tags(self):
296 '''return a mapping of tag to node'''
305 '''return a mapping of tag to node'''
297 if self._tags is None:
306 if self._tags is None:
298 (self._tags, self._tagtypes) = self._findtags()
307 (self._tags, self._tagtypes) = self._findtags()
299
308
300 return self._tags
309 return self._tags
301
310
302 def _findtags(self):
311 def _findtags(self):
303 '''Do the hard work of finding tags. Return a pair of dicts
312 '''Do the hard work of finding tags. Return a pair of dicts
304 (tags, tagtypes) where tags maps tag name to node, and tagtypes
313 (tags, tagtypes) where tags maps tag name to node, and tagtypes
305 maps tag name to a string like \'global\' or \'local\'.
314 maps tag name to a string like \'global\' or \'local\'.
306 Subclasses or extensions are free to add their own tags, but
315 Subclasses or extensions are free to add their own tags, but
307 should be aware that the returned dicts will be retained for the
316 should be aware that the returned dicts will be retained for the
308 duration of the localrepo object.'''
317 duration of the localrepo object.'''
309
318
310 # XXX what tagtype should subclasses/extensions use? Currently
319 # XXX what tagtype should subclasses/extensions use? Currently
311 # mq and bookmarks add tags, but do not set the tagtype at all.
320 # mq and bookmarks add tags, but do not set the tagtype at all.
312 # Should each extension invent its own tag type? Should there
321 # Should each extension invent its own tag type? Should there
313 # be one tagtype for all such "virtual" tags? Or is the status
322 # be one tagtype for all such "virtual" tags? Or is the status
314 # quo fine?
323 # quo fine?
315
324
316 alltags = {} # map tag name to (node, hist)
325 alltags = {} # map tag name to (node, hist)
317 tagtypes = {}
326 tagtypes = {}
318
327
319 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
328 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
320 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
329 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
321
330
322 # Build the return dicts. Have to re-encode tag names because
331 # Build the return dicts. Have to re-encode tag names because
323 # the tags module always uses UTF-8 (in order not to lose info
332 # the tags module always uses UTF-8 (in order not to lose info
324 # writing to the cache), but the rest of Mercurial wants them in
333 # writing to the cache), but the rest of Mercurial wants them in
325 # local encoding.
334 # local encoding.
326 tags = {}
335 tags = {}
327 for (name, (node, hist)) in alltags.iteritems():
336 for (name, (node, hist)) in alltags.iteritems():
328 if node != nullid:
337 if node != nullid:
329 tags[encoding.tolocal(name)] = node
338 tags[encoding.tolocal(name)] = node
330 tags['tip'] = self.changelog.tip()
339 tags['tip'] = self.changelog.tip()
331 tagtypes = dict([(encoding.tolocal(name), value)
340 tagtypes = dict([(encoding.tolocal(name), value)
332 for (name, value) in tagtypes.iteritems()])
341 for (name, value) in tagtypes.iteritems()])
333 return (tags, tagtypes)
342 return (tags, tagtypes)
334
343
335 def tagtype(self, tagname):
344 def tagtype(self, tagname):
336 '''
345 '''
337 return the type of the given tag. result can be:
346 return the type of the given tag. result can be:
338
347
339 'local' : a local tag
348 'local' : a local tag
340 'global' : a global tag
349 'global' : a global tag
341 None : tag does not exist
350 None : tag does not exist
342 '''
351 '''
343
352
344 self.tags()
353 self.tags()
345
354
346 return self._tagtypes.get(tagname)
355 return self._tagtypes.get(tagname)
347
356
348 def tagslist(self):
357 def tagslist(self):
349 '''return a list of tags ordered by revision'''
358 '''return a list of tags ordered by revision'''
350 l = []
359 l = []
351 for t, n in self.tags().iteritems():
360 for t, n in self.tags().iteritems():
352 try:
361 try:
353 r = self.changelog.rev(n)
362 r = self.changelog.rev(n)
354 except:
363 except:
355 r = -2 # sort to the beginning of the list if unknown
364 r = -2 # sort to the beginning of the list if unknown
356 l.append((r, t, n))
365 l.append((r, t, n))
357 return [(t, n) for r, t, n in sorted(l)]
366 return [(t, n) for r, t, n in sorted(l)]
358
367
359 def nodetags(self, node):
368 def nodetags(self, node):
360 '''return the tags associated with a node'''
369 '''return the tags associated with a node'''
361 if not self.nodetagscache:
370 if not self.nodetagscache:
362 self.nodetagscache = {}
371 self.nodetagscache = {}
363 for t, n in self.tags().iteritems():
372 for t, n in self.tags().iteritems():
364 self.nodetagscache.setdefault(n, []).append(t)
373 self.nodetagscache.setdefault(n, []).append(t)
365 for tags in self.nodetagscache.itervalues():
374 for tags in self.nodetagscache.itervalues():
366 tags.sort()
375 tags.sort()
367 return self.nodetagscache.get(node, [])
376 return self.nodetagscache.get(node, [])
368
377
369 def _branchtags(self, partial, lrev):
378 def _branchtags(self, partial, lrev):
370 # TODO: rename this function?
379 # TODO: rename this function?
371 tiprev = len(self) - 1
380 tiprev = len(self) - 1
372 if lrev != tiprev:
381 if lrev != tiprev:
373 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
382 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
374 self._updatebranchcache(partial, ctxgen)
383 self._updatebranchcache(partial, ctxgen)
375 self._writebranchcache(partial, self.changelog.tip(), tiprev)
384 self._writebranchcache(partial, self.changelog.tip(), tiprev)
376
385
377 return partial
386 return partial
378
387
379 def updatebranchcache(self):
388 def updatebranchcache(self):
380 tip = self.changelog.tip()
389 tip = self.changelog.tip()
381 if self._branchcache is not None and self._branchcachetip == tip:
390 if self._branchcache is not None and self._branchcachetip == tip:
382 return self._branchcache
391 return self._branchcache
383
392
384 oldtip = self._branchcachetip
393 oldtip = self._branchcachetip
385 self._branchcachetip = tip
394 self._branchcachetip = tip
386 if oldtip is None or oldtip not in self.changelog.nodemap:
395 if oldtip is None or oldtip not in self.changelog.nodemap:
387 partial, last, lrev = self._readbranchcache()
396 partial, last, lrev = self._readbranchcache()
388 else:
397 else:
389 lrev = self.changelog.rev(oldtip)
398 lrev = self.changelog.rev(oldtip)
390 partial = self._branchcache
399 partial = self._branchcache
391
400
392 self._branchtags(partial, lrev)
401 self._branchtags(partial, lrev)
393 # this private cache holds all heads (not just tips)
402 # this private cache holds all heads (not just tips)
394 self._branchcache = partial
403 self._branchcache = partial
395
404
396 def branchmap(self):
405 def branchmap(self):
397 '''returns a dictionary {branch: [branchheads]}'''
406 '''returns a dictionary {branch: [branchheads]}'''
398 self.updatebranchcache()
407 self.updatebranchcache()
399 return self._branchcache
408 return self._branchcache
400
409
401 def branchtags(self):
410 def branchtags(self):
402 '''return a dict where branch names map to the tipmost head of
411 '''return a dict where branch names map to the tipmost head of
403 the branch, open heads come before closed'''
412 the branch, open heads come before closed'''
404 bt = {}
413 bt = {}
405 for bn, heads in self.branchmap().iteritems():
414 for bn, heads in self.branchmap().iteritems():
406 tip = heads[-1]
415 tip = heads[-1]
407 for h in reversed(heads):
416 for h in reversed(heads):
408 if 'close' not in self.changelog.read(h)[5]:
417 if 'close' not in self.changelog.read(h)[5]:
409 tip = h
418 tip = h
410 break
419 break
411 bt[bn] = tip
420 bt[bn] = tip
412 return bt
421 return bt
413
422
414
423
415 def _readbranchcache(self):
424 def _readbranchcache(self):
416 partial = {}
425 partial = {}
417 try:
426 try:
418 f = self.opener("branchheads.cache")
427 f = self.opener("branchheads.cache")
419 lines = f.read().split('\n')
428 lines = f.read().split('\n')
420 f.close()
429 f.close()
421 except (IOError, OSError):
430 except (IOError, OSError):
422 return {}, nullid, nullrev
431 return {}, nullid, nullrev
423
432
424 try:
433 try:
425 last, lrev = lines.pop(0).split(" ", 1)
434 last, lrev = lines.pop(0).split(" ", 1)
426 last, lrev = bin(last), int(lrev)
435 last, lrev = bin(last), int(lrev)
427 if lrev >= len(self) or self[lrev].node() != last:
436 if lrev >= len(self) or self[lrev].node() != last:
428 # invalidate the cache
437 # invalidate the cache
429 raise ValueError('invalidating branch cache (tip differs)')
438 raise ValueError('invalidating branch cache (tip differs)')
430 for l in lines:
439 for l in lines:
431 if not l:
440 if not l:
432 continue
441 continue
433 node, label = l.split(" ", 1)
442 node, label = l.split(" ", 1)
434 partial.setdefault(label.strip(), []).append(bin(node))
443 partial.setdefault(label.strip(), []).append(bin(node))
435 except KeyboardInterrupt:
444 except KeyboardInterrupt:
436 raise
445 raise
437 except Exception, inst:
446 except Exception, inst:
438 if self.ui.debugflag:
447 if self.ui.debugflag:
439 self.ui.warn(str(inst), '\n')
448 self.ui.warn(str(inst), '\n')
440 partial, last, lrev = {}, nullid, nullrev
449 partial, last, lrev = {}, nullid, nullrev
441 return partial, last, lrev
450 return partial, last, lrev
442
451
443 def _writebranchcache(self, branches, tip, tiprev):
452 def _writebranchcache(self, branches, tip, tiprev):
444 try:
453 try:
445 f = self.opener("branchheads.cache", "w", atomictemp=True)
454 f = self.opener("branchheads.cache", "w", atomictemp=True)
446 f.write("%s %s\n" % (hex(tip), tiprev))
455 f.write("%s %s\n" % (hex(tip), tiprev))
447 for label, nodes in branches.iteritems():
456 for label, nodes in branches.iteritems():
448 for node in nodes:
457 for node in nodes:
449 f.write("%s %s\n" % (hex(node), label))
458 f.write("%s %s\n" % (hex(node), label))
450 f.rename()
459 f.rename()
451 except (IOError, OSError):
460 except (IOError, OSError):
452 pass
461 pass
453
462
454 def _updatebranchcache(self, partial, ctxgen):
463 def _updatebranchcache(self, partial, ctxgen):
455 # collect new branch entries
464 # collect new branch entries
456 newbranches = {}
465 newbranches = {}
457 for c in ctxgen:
466 for c in ctxgen:
458 newbranches.setdefault(c.branch(), []).append(c.node())
467 newbranches.setdefault(c.branch(), []).append(c.node())
459 # if older branchheads are reachable from new ones, they aren't
468 # if older branchheads are reachable from new ones, they aren't
460 # really branchheads. Note checking parents is insufficient:
469 # really branchheads. Note checking parents is insufficient:
461 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
470 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
462 for branch, newnodes in newbranches.iteritems():
471 for branch, newnodes in newbranches.iteritems():
463 bheads = partial.setdefault(branch, [])
472 bheads = partial.setdefault(branch, [])
464 bheads.extend(newnodes)
473 bheads.extend(newnodes)
465 if len(bheads) <= 1:
474 if len(bheads) <= 1:
466 continue
475 continue
467 # starting from tip means fewer passes over reachable
476 # starting from tip means fewer passes over reachable
468 while newnodes:
477 while newnodes:
469 latest = newnodes.pop()
478 latest = newnodes.pop()
470 if latest not in bheads:
479 if latest not in bheads:
471 continue
480 continue
472 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
481 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
473 reachable = self.changelog.reachable(latest, minbhrev)
482 reachable = self.changelog.reachable(latest, minbhrev)
474 reachable.remove(latest)
483 reachable.remove(latest)
475 bheads = [b for b in bheads if b not in reachable]
484 bheads = [b for b in bheads if b not in reachable]
476 partial[branch] = bheads
485 partial[branch] = bheads
477
486
478 def lookup(self, key):
487 def lookup(self, key):
479 if isinstance(key, int):
488 if isinstance(key, int):
480 return self.changelog.node(key)
489 return self.changelog.node(key)
481 elif key == '.':
490 elif key == '.':
482 return self.dirstate.parents()[0]
491 return self.dirstate.parents()[0]
483 elif key == 'null':
492 elif key == 'null':
484 return nullid
493 return nullid
485 elif key == 'tip':
494 elif key == 'tip':
486 return self.changelog.tip()
495 return self.changelog.tip()
487 n = self.changelog._match(key)
496 n = self.changelog._match(key)
488 if n:
497 if n:
489 return n
498 return n
490 if key in self.tags():
499 if key in self.tags():
491 return self.tags()[key]
500 return self.tags()[key]
492 if key in self.branchtags():
501 if key in self.branchtags():
493 return self.branchtags()[key]
502 return self.branchtags()[key]
494 n = self.changelog._partialmatch(key)
503 n = self.changelog._partialmatch(key)
495 if n:
504 if n:
496 return n
505 return n
497
506
498 # can't find key, check if it might have come from damaged dirstate
507 # can't find key, check if it might have come from damaged dirstate
499 if key in self.dirstate.parents():
508 if key in self.dirstate.parents():
500 raise error.Abort(_("working directory has unknown parent '%s'!")
509 raise error.Abort(_("working directory has unknown parent '%s'!")
501 % short(key))
510 % short(key))
502 try:
511 try:
503 if len(key) == 20:
512 if len(key) == 20:
504 key = hex(key)
513 key = hex(key)
505 except:
514 except:
506 pass
515 pass
507 raise error.RepoLookupError(_("unknown revision '%s'") % key)
516 raise error.RepoLookupError(_("unknown revision '%s'") % key)
508
517
509 def lookupbranch(self, key, remote=None):
518 def lookupbranch(self, key, remote=None):
510 repo = remote or self
519 repo = remote or self
511 if key in repo.branchmap():
520 if key in repo.branchmap():
512 return key
521 return key
513
522
514 repo = (remote and remote.local()) and remote or self
523 repo = (remote and remote.local()) and remote or self
515 return repo[key].branch()
524 return repo[key].branch()
516
525
517 def local(self):
526 def local(self):
518 return True
527 return True
519
528
520 def join(self, f):
529 def join(self, f):
521 return os.path.join(self.path, f)
530 return os.path.join(self.path, f)
522
531
523 def wjoin(self, f):
532 def wjoin(self, f):
524 return os.path.join(self.root, f)
533 return os.path.join(self.root, f)
525
534
526 def file(self, f):
535 def file(self, f):
527 if f[0] == '/':
536 if f[0] == '/':
528 f = f[1:]
537 f = f[1:]
529 return filelog.filelog(self.sopener, f)
538 return filelog.filelog(self.sopener, f)
530
539
531 def changectx(self, changeid):
540 def changectx(self, changeid):
532 return self[changeid]
541 return self[changeid]
533
542
534 def parents(self, changeid=None):
543 def parents(self, changeid=None):
535 '''get list of changectxs for parents of changeid'''
544 '''get list of changectxs for parents of changeid'''
536 return self[changeid].parents()
545 return self[changeid].parents()
537
546
538 def filectx(self, path, changeid=None, fileid=None):
547 def filectx(self, path, changeid=None, fileid=None):
539 """changeid can be a changeset revision, node, or tag.
548 """changeid can be a changeset revision, node, or tag.
540 fileid can be a file revision or node."""
549 fileid can be a file revision or node."""
541 return context.filectx(self, path, changeid, fileid)
550 return context.filectx(self, path, changeid, fileid)
542
551
543 def getcwd(self):
552 def getcwd(self):
544 return self.dirstate.getcwd()
553 return self.dirstate.getcwd()
545
554
546 def pathto(self, f, cwd=None):
555 def pathto(self, f, cwd=None):
547 return self.dirstate.pathto(f, cwd)
556 return self.dirstate.pathto(f, cwd)
548
557
549 def wfile(self, f, mode='r'):
558 def wfile(self, f, mode='r'):
550 return self.wopener(f, mode)
559 return self.wopener(f, mode)
551
560
552 def _link(self, f):
561 def _link(self, f):
553 return os.path.islink(self.wjoin(f))
562 return os.path.islink(self.wjoin(f))
554
563
555 def _loadfilter(self, filter):
564 def _loadfilter(self, filter):
556 if filter not in self.filterpats:
565 if filter not in self.filterpats:
557 l = []
566 l = []
558 for pat, cmd in self.ui.configitems(filter):
567 for pat, cmd in self.ui.configitems(filter):
559 if cmd == '!':
568 if cmd == '!':
560 continue
569 continue
561 mf = matchmod.match(self.root, '', [pat])
570 mf = matchmod.match(self.root, '', [pat])
562 fn = None
571 fn = None
563 params = cmd
572 params = cmd
564 for name, filterfn in self._datafilters.iteritems():
573 for name, filterfn in self._datafilters.iteritems():
565 if cmd.startswith(name):
574 if cmd.startswith(name):
566 fn = filterfn
575 fn = filterfn
567 params = cmd[len(name):].lstrip()
576 params = cmd[len(name):].lstrip()
568 break
577 break
569 if not fn:
578 if not fn:
570 fn = lambda s, c, **kwargs: util.filter(s, c)
579 fn = lambda s, c, **kwargs: util.filter(s, c)
571 # Wrap old filters not supporting keyword arguments
580 # Wrap old filters not supporting keyword arguments
572 if not inspect.getargspec(fn)[2]:
581 if not inspect.getargspec(fn)[2]:
573 oldfn = fn
582 oldfn = fn
574 fn = lambda s, c, **kwargs: oldfn(s, c)
583 fn = lambda s, c, **kwargs: oldfn(s, c)
575 l.append((mf, fn, params))
584 l.append((mf, fn, params))
576 self.filterpats[filter] = l
585 self.filterpats[filter] = l
577
586
578 def _filter(self, filter, filename, data):
587 def _filter(self, filter, filename, data):
579 self._loadfilter(filter)
588 self._loadfilter(filter)
580
589
581 for mf, fn, cmd in self.filterpats[filter]:
590 for mf, fn, cmd in self.filterpats[filter]:
582 if mf(filename):
591 if mf(filename):
583 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
592 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
584 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
593 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
585 break
594 break
586
595
587 return data
596 return data
588
597
589 def adddatafilter(self, name, filter):
598 def adddatafilter(self, name, filter):
590 self._datafilters[name] = filter
599 self._datafilters[name] = filter
591
600
592 def wread(self, filename):
601 def wread(self, filename):
593 if self._link(filename):
602 if self._link(filename):
594 data = os.readlink(self.wjoin(filename))
603 data = os.readlink(self.wjoin(filename))
595 else:
604 else:
596 data = self.wopener(filename, 'r').read()
605 data = self.wopener(filename, 'r').read()
597 return self._filter("encode", filename, data)
606 return self._filter("encode", filename, data)
598
607
599 def wwrite(self, filename, data, flags):
608 def wwrite(self, filename, data, flags):
600 data = self._filter("decode", filename, data)
609 data = self._filter("decode", filename, data)
601 try:
610 try:
602 os.unlink(self.wjoin(filename))
611 os.unlink(self.wjoin(filename))
603 except OSError:
612 except OSError:
604 pass
613 pass
605 if 'l' in flags:
614 if 'l' in flags:
606 self.wopener.symlink(data, filename)
615 self.wopener.symlink(data, filename)
607 else:
616 else:
608 self.wopener(filename, 'w').write(data)
617 self.wopener(filename, 'w').write(data)
609 if 'x' in flags:
618 if 'x' in flags:
610 util.set_flags(self.wjoin(filename), False, True)
619 util.set_flags(self.wjoin(filename), False, True)
611
620
612 def wwritedata(self, filename, data):
621 def wwritedata(self, filename, data):
613 return self._filter("decode", filename, data)
622 return self._filter("decode", filename, data)
614
623
615 def transaction(self, desc):
624 def transaction(self, desc):
616 tr = self._transref and self._transref() or None
625 tr = self._transref and self._transref() or None
617 if tr and tr.running():
626 if tr and tr.running():
618 return tr.nest()
627 return tr.nest()
619
628
620 # abort here if the journal already exists
629 # abort here if the journal already exists
621 if os.path.exists(self.sjoin("journal")):
630 if os.path.exists(self.sjoin("journal")):
622 raise error.RepoError(
631 raise error.RepoError(
623 _("abandoned transaction found - run hg recover"))
632 _("abandoned transaction found - run hg recover"))
624
633
625 # save dirstate for rollback
634 # save dirstate for rollback
626 try:
635 try:
627 ds = self.opener("dirstate").read()
636 ds = self.opener("dirstate").read()
628 except IOError:
637 except IOError:
629 ds = ""
638 ds = ""
630 self.opener("journal.dirstate", "w").write(ds)
639 self.opener("journal.dirstate", "w").write(ds)
631 self.opener("journal.branch", "w").write(self.dirstate.branch())
640 self.opener("journal.branch", "w").write(self.dirstate.branch())
632 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
641 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
633
642
634 renames = [(self.sjoin("journal"), self.sjoin("undo")),
643 renames = [(self.sjoin("journal"), self.sjoin("undo")),
635 (self.join("journal.dirstate"), self.join("undo.dirstate")),
644 (self.join("journal.dirstate"), self.join("undo.dirstate")),
636 (self.join("journal.branch"), self.join("undo.branch")),
645 (self.join("journal.branch"), self.join("undo.branch")),
637 (self.join("journal.desc"), self.join("undo.desc"))]
646 (self.join("journal.desc"), self.join("undo.desc"))]
638 tr = transaction.transaction(self.ui.warn, self.sopener,
647 tr = transaction.transaction(self.ui.warn, self.sopener,
639 self.sjoin("journal"),
648 self.sjoin("journal"),
640 aftertrans(renames),
649 aftertrans(renames),
641 self.store.createmode)
650 self.store.createmode)
642 self._transref = weakref.ref(tr)
651 self._transref = weakref.ref(tr)
643 return tr
652 return tr
644
653
645 def recover(self):
654 def recover(self):
646 lock = self.lock()
655 lock = self.lock()
647 try:
656 try:
648 if os.path.exists(self.sjoin("journal")):
657 if os.path.exists(self.sjoin("journal")):
649 self.ui.status(_("rolling back interrupted transaction\n"))
658 self.ui.status(_("rolling back interrupted transaction\n"))
650 transaction.rollback(self.sopener, self.sjoin("journal"),
659 transaction.rollback(self.sopener, self.sjoin("journal"),
651 self.ui.warn)
660 self.ui.warn)
652 self.invalidate()
661 self.invalidate()
653 return True
662 return True
654 else:
663 else:
655 self.ui.warn(_("no interrupted transaction available\n"))
664 self.ui.warn(_("no interrupted transaction available\n"))
656 return False
665 return False
657 finally:
666 finally:
658 lock.release()
667 lock.release()
659
668
660 def rollback(self, dryrun=False):
669 def rollback(self, dryrun=False):
661 wlock = lock = None
670 wlock = lock = None
662 try:
671 try:
663 wlock = self.wlock()
672 wlock = self.wlock()
664 lock = self.lock()
673 lock = self.lock()
665 if os.path.exists(self.sjoin("undo")):
674 if os.path.exists(self.sjoin("undo")):
666 try:
675 try:
667 args = self.opener("undo.desc", "r").read().splitlines()
676 args = self.opener("undo.desc", "r").read().splitlines()
668 if len(args) >= 3 and self.ui.verbose:
677 if len(args) >= 3 and self.ui.verbose:
669 desc = _("rolling back to revision %s"
678 desc = _("rolling back to revision %s"
670 " (undo %s: %s)\n") % (
679 " (undo %s: %s)\n") % (
671 int(args[0]) - 1, args[1], args[2])
680 int(args[0]) - 1, args[1], args[2])
672 elif len(args) >= 2:
681 elif len(args) >= 2:
673 desc = _("rolling back to revision %s (undo %s)\n") % (
682 desc = _("rolling back to revision %s (undo %s)\n") % (
674 int(args[0]) - 1, args[1])
683 int(args[0]) - 1, args[1])
675 except IOError:
684 except IOError:
676 desc = _("rolling back unknown transaction\n")
685 desc = _("rolling back unknown transaction\n")
677 self.ui.status(desc)
686 self.ui.status(desc)
678 if dryrun:
687 if dryrun:
679 return
688 return
680 transaction.rollback(self.sopener, self.sjoin("undo"),
689 transaction.rollback(self.sopener, self.sjoin("undo"),
681 self.ui.warn)
690 self.ui.warn)
682 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
691 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
683 try:
692 try:
684 branch = self.opener("undo.branch").read()
693 branch = self.opener("undo.branch").read()
685 self.dirstate.setbranch(branch)
694 self.dirstate.setbranch(branch)
686 except IOError:
695 except IOError:
687 self.ui.warn(_("Named branch could not be reset, "
696 self.ui.warn(_("Named branch could not be reset, "
688 "current branch still is: %s\n")
697 "current branch still is: %s\n")
689 % encoding.tolocal(self.dirstate.branch()))
698 % encoding.tolocal(self.dirstate.branch()))
690 self.invalidate()
699 self.invalidate()
691 self.dirstate.invalidate()
700 self.dirstate.invalidate()
692 self.destroyed()
701 self.destroyed()
693 else:
702 else:
694 self.ui.warn(_("no rollback information available\n"))
703 self.ui.warn(_("no rollback information available\n"))
695 return 1
704 return 1
696 finally:
705 finally:
697 release(lock, wlock)
706 release(lock, wlock)
698
707
699 def invalidatecaches(self):
708 def invalidatecaches(self):
700 self._tags = None
709 self._tags = None
701 self._tagtypes = None
710 self._tagtypes = None
702 self.nodetagscache = None
711 self.nodetagscache = None
703 self._branchcache = None # in UTF-8
712 self._branchcache = None # in UTF-8
704 self._branchcachetip = None
713 self._branchcachetip = None
705
714
706 def invalidate(self):
715 def invalidate(self):
707 for a in "changelog manifest".split():
716 for a in "changelog manifest".split():
708 if a in self.__dict__:
717 if a in self.__dict__:
709 delattr(self, a)
718 delattr(self, a)
710 self.invalidatecaches()
719 self.invalidatecaches()
711
720
712 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
721 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
713 try:
722 try:
714 l = lock.lock(lockname, 0, releasefn, desc=desc)
723 l = lock.lock(lockname, 0, releasefn, desc=desc)
715 except error.LockHeld, inst:
724 except error.LockHeld, inst:
716 if not wait:
725 if not wait:
717 raise
726 raise
718 self.ui.warn(_("waiting for lock on %s held by %r\n") %
727 self.ui.warn(_("waiting for lock on %s held by %r\n") %
719 (desc, inst.locker))
728 (desc, inst.locker))
720 # default to 600 seconds timeout
729 # default to 600 seconds timeout
721 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
730 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
722 releasefn, desc=desc)
731 releasefn, desc=desc)
723 if acquirefn:
732 if acquirefn:
724 acquirefn()
733 acquirefn()
725 return l
734 return l
726
735
727 def lock(self, wait=True):
736 def lock(self, wait=True):
728 '''Lock the repository store (.hg/store) and return a weak reference
737 '''Lock the repository store (.hg/store) and return a weak reference
729 to the lock. Use this before modifying the store (e.g. committing or
738 to the lock. Use this before modifying the store (e.g. committing or
730 stripping). If you are opening a transaction, get a lock as well.)'''
739 stripping). If you are opening a transaction, get a lock as well.)'''
731 l = self._lockref and self._lockref()
740 l = self._lockref and self._lockref()
732 if l is not None and l.held:
741 if l is not None and l.held:
733 l.lock()
742 l.lock()
734 return l
743 return l
735
744
736 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
745 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
737 _('repository %s') % self.origroot)
746 _('repository %s') % self.origroot)
738 self._lockref = weakref.ref(l)
747 self._lockref = weakref.ref(l)
739 return l
748 return l
740
749
741 def wlock(self, wait=True):
750 def wlock(self, wait=True):
742 '''Lock the non-store parts of the repository (everything under
751 '''Lock the non-store parts of the repository (everything under
743 .hg except .hg/store) and return a weak reference to the lock.
752 .hg except .hg/store) and return a weak reference to the lock.
744 Use this before modifying files in .hg.'''
753 Use this before modifying files in .hg.'''
745 l = self._wlockref and self._wlockref()
754 l = self._wlockref and self._wlockref()
746 if l is not None and l.held:
755 if l is not None and l.held:
747 l.lock()
756 l.lock()
748 return l
757 return l
749
758
750 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
759 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
751 self.dirstate.invalidate, _('working directory of %s') %
760 self.dirstate.invalidate, _('working directory of %s') %
752 self.origroot)
761 self.origroot)
753 self._wlockref = weakref.ref(l)
762 self._wlockref = weakref.ref(l)
754 return l
763 return l
755
764
756 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
765 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
757 """
766 """
758 commit an individual file as part of a larger transaction
767 commit an individual file as part of a larger transaction
759 """
768 """
760
769
761 fname = fctx.path()
770 fname = fctx.path()
762 text = fctx.data()
771 text = fctx.data()
763 flog = self.file(fname)
772 flog = self.file(fname)
764 fparent1 = manifest1.get(fname, nullid)
773 fparent1 = manifest1.get(fname, nullid)
765 fparent2 = fparent2o = manifest2.get(fname, nullid)
774 fparent2 = fparent2o = manifest2.get(fname, nullid)
766
775
767 meta = {}
776 meta = {}
768 copy = fctx.renamed()
777 copy = fctx.renamed()
769 if copy and copy[0] != fname:
778 if copy and copy[0] != fname:
770 # Mark the new revision of this file as a copy of another
779 # Mark the new revision of this file as a copy of another
771 # file. This copy data will effectively act as a parent
780 # file. This copy data will effectively act as a parent
772 # of this new revision. If this is a merge, the first
781 # of this new revision. If this is a merge, the first
773 # parent will be the nullid (meaning "look up the copy data")
782 # parent will be the nullid (meaning "look up the copy data")
774 # and the second one will be the other parent. For example:
783 # and the second one will be the other parent. For example:
775 #
784 #
776 # 0 --- 1 --- 3 rev1 changes file foo
785 # 0 --- 1 --- 3 rev1 changes file foo
777 # \ / rev2 renames foo to bar and changes it
786 # \ / rev2 renames foo to bar and changes it
778 # \- 2 -/ rev3 should have bar with all changes and
787 # \- 2 -/ rev3 should have bar with all changes and
779 # should record that bar descends from
788 # should record that bar descends from
780 # bar in rev2 and foo in rev1
789 # bar in rev2 and foo in rev1
781 #
790 #
782 # this allows this merge to succeed:
791 # this allows this merge to succeed:
783 #
792 #
784 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
793 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
785 # \ / merging rev3 and rev4 should use bar@rev2
794 # \ / merging rev3 and rev4 should use bar@rev2
786 # \- 2 --- 4 as the merge base
795 # \- 2 --- 4 as the merge base
787 #
796 #
788
797
789 cfname = copy[0]
798 cfname = copy[0]
790 crev = manifest1.get(cfname)
799 crev = manifest1.get(cfname)
791 newfparent = fparent2
800 newfparent = fparent2
792
801
793 if manifest2: # branch merge
802 if manifest2: # branch merge
794 if fparent2 == nullid or crev is None: # copied on remote side
803 if fparent2 == nullid or crev is None: # copied on remote side
795 if cfname in manifest2:
804 if cfname in manifest2:
796 crev = manifest2[cfname]
805 crev = manifest2[cfname]
797 newfparent = fparent1
806 newfparent = fparent1
798
807
799 # find source in nearest ancestor if we've lost track
808 # find source in nearest ancestor if we've lost track
800 if not crev:
809 if not crev:
801 self.ui.debug(" %s: searching for copy revision for %s\n" %
810 self.ui.debug(" %s: searching for copy revision for %s\n" %
802 (fname, cfname))
811 (fname, cfname))
803 for ancestor in self['.'].ancestors():
812 for ancestor in self['.'].ancestors():
804 if cfname in ancestor:
813 if cfname in ancestor:
805 crev = ancestor[cfname].filenode()
814 crev = ancestor[cfname].filenode()
806 break
815 break
807
816
808 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
817 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
809 meta["copy"] = cfname
818 meta["copy"] = cfname
810 meta["copyrev"] = hex(crev)
819 meta["copyrev"] = hex(crev)
811 fparent1, fparent2 = nullid, newfparent
820 fparent1, fparent2 = nullid, newfparent
812 elif fparent2 != nullid:
821 elif fparent2 != nullid:
813 # is one parent an ancestor of the other?
822 # is one parent an ancestor of the other?
814 fparentancestor = flog.ancestor(fparent1, fparent2)
823 fparentancestor = flog.ancestor(fparent1, fparent2)
815 if fparentancestor == fparent1:
824 if fparentancestor == fparent1:
816 fparent1, fparent2 = fparent2, nullid
825 fparent1, fparent2 = fparent2, nullid
817 elif fparentancestor == fparent2:
826 elif fparentancestor == fparent2:
818 fparent2 = nullid
827 fparent2 = nullid
819
828
820 # is the file changed?
829 # is the file changed?
821 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
830 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
822 changelist.append(fname)
831 changelist.append(fname)
823 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
832 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
824
833
825 # are just the flags changed during merge?
834 # are just the flags changed during merge?
826 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
835 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
827 changelist.append(fname)
836 changelist.append(fname)
828
837
829 return fparent1
838 return fparent1
830
839
831 def commit(self, text="", user=None, date=None, match=None, force=False,
840 def commit(self, text="", user=None, date=None, match=None, force=False,
832 editor=False, extra={}):
841 editor=False, extra={}):
833 """Add a new revision to current repository.
842 """Add a new revision to current repository.
834
843
835 Revision information is gathered from the working directory,
844 Revision information is gathered from the working directory,
836 match can be used to filter the committed files. If editor is
845 match can be used to filter the committed files. If editor is
837 supplied, it is called to get a commit message.
846 supplied, it is called to get a commit message.
838 """
847 """
839
848
840 def fail(f, msg):
849 def fail(f, msg):
841 raise util.Abort('%s: %s' % (f, msg))
850 raise util.Abort('%s: %s' % (f, msg))
842
851
843 if not match:
852 if not match:
844 match = matchmod.always(self.root, '')
853 match = matchmod.always(self.root, '')
845
854
846 if not force:
855 if not force:
847 vdirs = []
856 vdirs = []
848 match.dir = vdirs.append
857 match.dir = vdirs.append
849 match.bad = fail
858 match.bad = fail
850
859
851 wlock = self.wlock()
860 wlock = self.wlock()
852 try:
861 try:
853 wctx = self[None]
862 wctx = self[None]
854 merge = len(wctx.parents()) > 1
863 merge = len(wctx.parents()) > 1
855
864
856 if (not force and merge and match and
865 if (not force and merge and match and
857 (match.files() or match.anypats())):
866 (match.files() or match.anypats())):
858 raise util.Abort(_('cannot partially commit a merge '
867 raise util.Abort(_('cannot partially commit a merge '
859 '(do not specify files or patterns)'))
868 '(do not specify files or patterns)'))
860
869
861 changes = self.status(match=match, clean=force)
870 changes = self.status(match=match, clean=force)
862 if force:
871 if force:
863 changes[0].extend(changes[6]) # mq may commit unchanged files
872 changes[0].extend(changes[6]) # mq may commit unchanged files
864
873
865 # check subrepos
874 # check subrepos
866 subs = []
875 subs = []
867 removedsubs = set()
876 removedsubs = set()
868 for p in wctx.parents():
877 for p in wctx.parents():
869 removedsubs.update(s for s in p.substate if match(s))
878 removedsubs.update(s for s in p.substate if match(s))
870 for s in wctx.substate:
879 for s in wctx.substate:
871 removedsubs.discard(s)
880 removedsubs.discard(s)
872 if match(s) and wctx.sub(s).dirty():
881 if match(s) and wctx.sub(s).dirty():
873 subs.append(s)
882 subs.append(s)
874 if (subs or removedsubs):
883 if (subs or removedsubs):
875 if (not match('.hgsub') and
884 if (not match('.hgsub') and
876 '.hgsub' in (wctx.modified() + wctx.added())):
885 '.hgsub' in (wctx.modified() + wctx.added())):
877 raise util.Abort(_("can't commit subrepos without .hgsub"))
886 raise util.Abort(_("can't commit subrepos without .hgsub"))
878 if '.hgsubstate' not in changes[0]:
887 if '.hgsubstate' not in changes[0]:
879 changes[0].insert(0, '.hgsubstate')
888 changes[0].insert(0, '.hgsubstate')
880
889
881 # make sure all explicit patterns are matched
890 # make sure all explicit patterns are matched
882 if not force and match.files():
891 if not force and match.files():
883 matched = set(changes[0] + changes[1] + changes[2])
892 matched = set(changes[0] + changes[1] + changes[2])
884
893
885 for f in match.files():
894 for f in match.files():
886 if f == '.' or f in matched or f in wctx.substate:
895 if f == '.' or f in matched or f in wctx.substate:
887 continue
896 continue
888 if f in changes[3]: # missing
897 if f in changes[3]: # missing
889 fail(f, _('file not found!'))
898 fail(f, _('file not found!'))
890 if f in vdirs: # visited directory
899 if f in vdirs: # visited directory
891 d = f + '/'
900 d = f + '/'
892 for mf in matched:
901 for mf in matched:
893 if mf.startswith(d):
902 if mf.startswith(d):
894 break
903 break
895 else:
904 else:
896 fail(f, _("no match under directory!"))
905 fail(f, _("no match under directory!"))
897 elif f not in self.dirstate:
906 elif f not in self.dirstate:
898 fail(f, _("file not tracked!"))
907 fail(f, _("file not tracked!"))
899
908
900 if (not force and not extra.get("close") and not merge
909 if (not force and not extra.get("close") and not merge
901 and not (changes[0] or changes[1] or changes[2])
910 and not (changes[0] or changes[1] or changes[2])
902 and wctx.branch() == wctx.p1().branch()):
911 and wctx.branch() == wctx.p1().branch()):
903 return None
912 return None
904
913
905 ms = mergemod.mergestate(self)
914 ms = mergemod.mergestate(self)
906 for f in changes[0]:
915 for f in changes[0]:
907 if f in ms and ms[f] == 'u':
916 if f in ms and ms[f] == 'u':
908 raise util.Abort(_("unresolved merge conflicts "
917 raise util.Abort(_("unresolved merge conflicts "
909 "(see hg resolve)"))
918 "(see hg resolve)"))
910
919
911 cctx = context.workingctx(self, text, user, date, extra, changes)
920 cctx = context.workingctx(self, text, user, date, extra, changes)
912 if editor:
921 if editor:
913 cctx._text = editor(self, cctx, subs)
922 cctx._text = editor(self, cctx, subs)
914 edited = (text != cctx._text)
923 edited = (text != cctx._text)
915
924
916 # commit subs
925 # commit subs
917 if subs or removedsubs:
926 if subs or removedsubs:
918 state = wctx.substate.copy()
927 state = wctx.substate.copy()
919 for s in sorted(subs):
928 for s in sorted(subs):
920 sub = wctx.sub(s)
929 sub = wctx.sub(s)
921 self.ui.status(_('committing subrepository %s\n') %
930 self.ui.status(_('committing subrepository %s\n') %
922 subrepo.relpath(sub))
931 subrepo.relpath(sub))
923 sr = sub.commit(cctx._text, user, date)
932 sr = sub.commit(cctx._text, user, date)
924 state[s] = (state[s][0], sr)
933 state[s] = (state[s][0], sr)
925 subrepo.writestate(self, state)
934 subrepo.writestate(self, state)
926
935
927 # Save commit message in case this transaction gets rolled back
936 # Save commit message in case this transaction gets rolled back
928 # (e.g. by a pretxncommit hook). Leave the content alone on
937 # (e.g. by a pretxncommit hook). Leave the content alone on
929 # the assumption that the user will use the same editor again.
938 # the assumption that the user will use the same editor again.
930 msgfile = self.opener('last-message.txt', 'wb')
939 msgfile = self.opener('last-message.txt', 'wb')
931 msgfile.write(cctx._text)
940 msgfile.write(cctx._text)
932 msgfile.close()
941 msgfile.close()
933
942
934 p1, p2 = self.dirstate.parents()
943 p1, p2 = self.dirstate.parents()
935 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
944 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
936 try:
945 try:
937 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
946 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
938 ret = self.commitctx(cctx, True)
947 ret = self.commitctx(cctx, True)
939 except:
948 except:
940 if edited:
949 if edited:
941 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
950 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
942 self.ui.write(
951 self.ui.write(
943 _('note: commit message saved in %s\n') % msgfn)
952 _('note: commit message saved in %s\n') % msgfn)
944 raise
953 raise
945
954
946 # update dirstate and mergestate
955 # update dirstate and mergestate
947 for f in changes[0] + changes[1]:
956 for f in changes[0] + changes[1]:
948 self.dirstate.normal(f)
957 self.dirstate.normal(f)
949 for f in changes[2]:
958 for f in changes[2]:
950 self.dirstate.forget(f)
959 self.dirstate.forget(f)
951 self.dirstate.setparents(ret)
960 self.dirstate.setparents(ret)
952 ms.reset()
961 ms.reset()
953 finally:
962 finally:
954 wlock.release()
963 wlock.release()
955
964
956 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
965 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
957 return ret
966 return ret
958
967
959 def commitctx(self, ctx, error=False):
968 def commitctx(self, ctx, error=False):
960 """Add a new revision to current repository.
969 """Add a new revision to current repository.
961 Revision information is passed via the context argument.
970 Revision information is passed via the context argument.
962 """
971 """
963
972
964 tr = lock = None
973 tr = lock = None
965 removed = ctx.removed()
974 removed = ctx.removed()
966 p1, p2 = ctx.p1(), ctx.p2()
975 p1, p2 = ctx.p1(), ctx.p2()
967 m1 = p1.manifest().copy()
976 m1 = p1.manifest().copy()
968 m2 = p2.manifest()
977 m2 = p2.manifest()
969 user = ctx.user()
978 user = ctx.user()
970
979
971 lock = self.lock()
980 lock = self.lock()
972 try:
981 try:
973 tr = self.transaction("commit")
982 tr = self.transaction("commit")
974 trp = weakref.proxy(tr)
983 trp = weakref.proxy(tr)
975
984
976 # check in files
985 # check in files
977 new = {}
986 new = {}
978 changed = []
987 changed = []
979 linkrev = len(self)
988 linkrev = len(self)
980 for f in sorted(ctx.modified() + ctx.added()):
989 for f in sorted(ctx.modified() + ctx.added()):
981 self.ui.note(f + "\n")
990 self.ui.note(f + "\n")
982 try:
991 try:
983 fctx = ctx[f]
992 fctx = ctx[f]
984 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
993 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
985 changed)
994 changed)
986 m1.set(f, fctx.flags())
995 m1.set(f, fctx.flags())
987 except OSError, inst:
996 except OSError, inst:
988 self.ui.warn(_("trouble committing %s!\n") % f)
997 self.ui.warn(_("trouble committing %s!\n") % f)
989 raise
998 raise
990 except IOError, inst:
999 except IOError, inst:
991 errcode = getattr(inst, 'errno', errno.ENOENT)
1000 errcode = getattr(inst, 'errno', errno.ENOENT)
992 if error or errcode and errcode != errno.ENOENT:
1001 if error or errcode and errcode != errno.ENOENT:
993 self.ui.warn(_("trouble committing %s!\n") % f)
1002 self.ui.warn(_("trouble committing %s!\n") % f)
994 raise
1003 raise
995 else:
1004 else:
996 removed.append(f)
1005 removed.append(f)
997
1006
998 # update manifest
1007 # update manifest
999 m1.update(new)
1008 m1.update(new)
1000 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1009 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1001 drop = [f for f in removed if f in m1]
1010 drop = [f for f in removed if f in m1]
1002 for f in drop:
1011 for f in drop:
1003 del m1[f]
1012 del m1[f]
1004 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1013 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1005 p2.manifestnode(), (new, drop))
1014 p2.manifestnode(), (new, drop))
1006
1015
1007 # update changelog
1016 # update changelog
1008 self.changelog.delayupdate()
1017 self.changelog.delayupdate()
1009 n = self.changelog.add(mn, changed + removed, ctx.description(),
1018 n = self.changelog.add(mn, changed + removed, ctx.description(),
1010 trp, p1.node(), p2.node(),
1019 trp, p1.node(), p2.node(),
1011 user, ctx.date(), ctx.extra().copy())
1020 user, ctx.date(), ctx.extra().copy())
1012 p = lambda: self.changelog.writepending() and self.root or ""
1021 p = lambda: self.changelog.writepending() and self.root or ""
1013 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1022 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1014 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1023 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1015 parent2=xp2, pending=p)
1024 parent2=xp2, pending=p)
1016 self.changelog.finalize(trp)
1025 self.changelog.finalize(trp)
1017 tr.close()
1026 tr.close()
1018
1027
1019 if self._branchcache:
1028 if self._branchcache:
1020 self.updatebranchcache()
1029 self.updatebranchcache()
1021 return n
1030 return n
1022 finally:
1031 finally:
1023 if tr:
1032 if tr:
1024 tr.release()
1033 tr.release()
1025 lock.release()
1034 lock.release()
1026
1035
1027 def destroyed(self):
1036 def destroyed(self):
1028 '''Inform the repository that nodes have been destroyed.
1037 '''Inform the repository that nodes have been destroyed.
1029 Intended for use by strip and rollback, so there's a common
1038 Intended for use by strip and rollback, so there's a common
1030 place for anything that has to be done after destroying history.'''
1039 place for anything that has to be done after destroying history.'''
1031 # XXX it might be nice if we could take the list of destroyed
1040 # XXX it might be nice if we could take the list of destroyed
1032 # nodes, but I don't see an easy way for rollback() to do that
1041 # nodes, but I don't see an easy way for rollback() to do that
1033
1042
1034 # Ensure the persistent tag cache is updated. Doing it now
1043 # Ensure the persistent tag cache is updated. Doing it now
1035 # means that the tag cache only has to worry about destroyed
1044 # means that the tag cache only has to worry about destroyed
1036 # heads immediately after a strip/rollback. That in turn
1045 # heads immediately after a strip/rollback. That in turn
1037 # guarantees that "cachetip == currenttip" (comparing both rev
1046 # guarantees that "cachetip == currenttip" (comparing both rev
1038 # and node) always means no nodes have been added or destroyed.
1047 # and node) always means no nodes have been added or destroyed.
1039
1048
1040 # XXX this is suboptimal when qrefresh'ing: we strip the current
1049 # XXX this is suboptimal when qrefresh'ing: we strip the current
1041 # head, refresh the tag cache, then immediately add a new head.
1050 # head, refresh the tag cache, then immediately add a new head.
1042 # But I think doing it this way is necessary for the "instant
1051 # But I think doing it this way is necessary for the "instant
1043 # tag cache retrieval" case to work.
1052 # tag cache retrieval" case to work.
1044 self.invalidatecaches()
1053 self.invalidatecaches()
1045
1054
1046 def walk(self, match, node=None):
1055 def walk(self, match, node=None):
1047 '''
1056 '''
1048 walk recursively through the directory tree or a given
1057 walk recursively through the directory tree or a given
1049 changeset, finding all files matched by the match
1058 changeset, finding all files matched by the match
1050 function
1059 function
1051 '''
1060 '''
1052 return self[node].walk(match)
1061 return self[node].walk(match)
1053
1062
1054 def status(self, node1='.', node2=None, match=None,
1063 def status(self, node1='.', node2=None, match=None,
1055 ignored=False, clean=False, unknown=False,
1064 ignored=False, clean=False, unknown=False,
1056 listsubrepos=False):
1065 listsubrepos=False):
1057 """return status of files between two nodes or node and working directory
1066 """return status of files between two nodes or node and working directory
1058
1067
1059 If node1 is None, use the first dirstate parent instead.
1068 If node1 is None, use the first dirstate parent instead.
1060 If node2 is None, compare node1 with working directory.
1069 If node2 is None, compare node1 with working directory.
1061 """
1070 """
1062
1071
1063 def mfmatches(ctx):
1072 def mfmatches(ctx):
1064 mf = ctx.manifest().copy()
1073 mf = ctx.manifest().copy()
1065 for fn in mf.keys():
1074 for fn in mf.keys():
1066 if not match(fn):
1075 if not match(fn):
1067 del mf[fn]
1076 del mf[fn]
1068 return mf
1077 return mf
1069
1078
1070 if isinstance(node1, context.changectx):
1079 if isinstance(node1, context.changectx):
1071 ctx1 = node1
1080 ctx1 = node1
1072 else:
1081 else:
1073 ctx1 = self[node1]
1082 ctx1 = self[node1]
1074 if isinstance(node2, context.changectx):
1083 if isinstance(node2, context.changectx):
1075 ctx2 = node2
1084 ctx2 = node2
1076 else:
1085 else:
1077 ctx2 = self[node2]
1086 ctx2 = self[node2]
1078
1087
1079 working = ctx2.rev() is None
1088 working = ctx2.rev() is None
1080 parentworking = working and ctx1 == self['.']
1089 parentworking = working and ctx1 == self['.']
1081 match = match or matchmod.always(self.root, self.getcwd())
1090 match = match or matchmod.always(self.root, self.getcwd())
1082 listignored, listclean, listunknown = ignored, clean, unknown
1091 listignored, listclean, listunknown = ignored, clean, unknown
1083
1092
1084 # load earliest manifest first for caching reasons
1093 # load earliest manifest first for caching reasons
1085 if not working and ctx2.rev() < ctx1.rev():
1094 if not working and ctx2.rev() < ctx1.rev():
1086 ctx2.manifest()
1095 ctx2.manifest()
1087
1096
1088 if not parentworking:
1097 if not parentworking:
1089 def bad(f, msg):
1098 def bad(f, msg):
1090 if f not in ctx1:
1099 if f not in ctx1:
1091 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1100 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1092 match.bad = bad
1101 match.bad = bad
1093
1102
1094 if working: # we need to scan the working dir
1103 if working: # we need to scan the working dir
1095 subrepos = []
1104 subrepos = []
1096 if '.hgsub' in self.dirstate:
1105 if '.hgsub' in self.dirstate:
1097 subrepos = ctx1.substate.keys()
1106 subrepos = ctx1.substate.keys()
1098 s = self.dirstate.status(match, subrepos, listignored,
1107 s = self.dirstate.status(match, subrepos, listignored,
1099 listclean, listunknown)
1108 listclean, listunknown)
1100 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1109 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1101
1110
1102 # check for any possibly clean files
1111 # check for any possibly clean files
1103 if parentworking and cmp:
1112 if parentworking and cmp:
1104 fixup = []
1113 fixup = []
1105 # do a full compare of any files that might have changed
1114 # do a full compare of any files that might have changed
1106 for f in sorted(cmp):
1115 for f in sorted(cmp):
1107 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1116 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1108 or ctx1[f].cmp(ctx2[f])):
1117 or ctx1[f].cmp(ctx2[f])):
1109 modified.append(f)
1118 modified.append(f)
1110 else:
1119 else:
1111 fixup.append(f)
1120 fixup.append(f)
1112
1121
1113 # update dirstate for files that are actually clean
1122 # update dirstate for files that are actually clean
1114 if fixup:
1123 if fixup:
1115 if listclean:
1124 if listclean:
1116 clean += fixup
1125 clean += fixup
1117
1126
1118 try:
1127 try:
1119 # updating the dirstate is optional
1128 # updating the dirstate is optional
1120 # so we don't wait on the lock
1129 # so we don't wait on the lock
1121 wlock = self.wlock(False)
1130 wlock = self.wlock(False)
1122 try:
1131 try:
1123 for f in fixup:
1132 for f in fixup:
1124 self.dirstate.normal(f)
1133 self.dirstate.normal(f)
1125 finally:
1134 finally:
1126 wlock.release()
1135 wlock.release()
1127 except error.LockError:
1136 except error.LockError:
1128 pass
1137 pass
1129
1138
1130 if not parentworking:
1139 if not parentworking:
1131 mf1 = mfmatches(ctx1)
1140 mf1 = mfmatches(ctx1)
1132 if working:
1141 if working:
1133 # we are comparing working dir against non-parent
1142 # we are comparing working dir against non-parent
1134 # generate a pseudo-manifest for the working dir
1143 # generate a pseudo-manifest for the working dir
1135 mf2 = mfmatches(self['.'])
1144 mf2 = mfmatches(self['.'])
1136 for f in cmp + modified + added:
1145 for f in cmp + modified + added:
1137 mf2[f] = None
1146 mf2[f] = None
1138 mf2.set(f, ctx2.flags(f))
1147 mf2.set(f, ctx2.flags(f))
1139 for f in removed:
1148 for f in removed:
1140 if f in mf2:
1149 if f in mf2:
1141 del mf2[f]
1150 del mf2[f]
1142 else:
1151 else:
1143 # we are comparing two revisions
1152 # we are comparing two revisions
1144 deleted, unknown, ignored = [], [], []
1153 deleted, unknown, ignored = [], [], []
1145 mf2 = mfmatches(ctx2)
1154 mf2 = mfmatches(ctx2)
1146
1155
1147 modified, added, clean = [], [], []
1156 modified, added, clean = [], [], []
1148 for fn in mf2:
1157 for fn in mf2:
1149 if fn in mf1:
1158 if fn in mf1:
1150 if (mf1.flags(fn) != mf2.flags(fn) or
1159 if (mf1.flags(fn) != mf2.flags(fn) or
1151 (mf1[fn] != mf2[fn] and
1160 (mf1[fn] != mf2[fn] and
1152 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1161 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1153 modified.append(fn)
1162 modified.append(fn)
1154 elif listclean:
1163 elif listclean:
1155 clean.append(fn)
1164 clean.append(fn)
1156 del mf1[fn]
1165 del mf1[fn]
1157 else:
1166 else:
1158 added.append(fn)
1167 added.append(fn)
1159 removed = mf1.keys()
1168 removed = mf1.keys()
1160
1169
1161 r = modified, added, removed, deleted, unknown, ignored, clean
1170 r = modified, added, removed, deleted, unknown, ignored, clean
1162
1171
1163 if listsubrepos:
1172 if listsubrepos:
1164 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1173 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1165 if working:
1174 if working:
1166 rev2 = None
1175 rev2 = None
1167 else:
1176 else:
1168 rev2 = ctx2.substate[subpath][1]
1177 rev2 = ctx2.substate[subpath][1]
1169 try:
1178 try:
1170 submatch = matchmod.narrowmatcher(subpath, match)
1179 submatch = matchmod.narrowmatcher(subpath, match)
1171 s = sub.status(rev2, match=submatch, ignored=listignored,
1180 s = sub.status(rev2, match=submatch, ignored=listignored,
1172 clean=listclean, unknown=listunknown,
1181 clean=listclean, unknown=listunknown,
1173 listsubrepos=True)
1182 listsubrepos=True)
1174 for rfiles, sfiles in zip(r, s):
1183 for rfiles, sfiles in zip(r, s):
1175 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1184 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1176 except error.LookupError:
1185 except error.LookupError:
1177 self.ui.status(_("skipping missing subrepository: %s\n")
1186 self.ui.status(_("skipping missing subrepository: %s\n")
1178 % subpath)
1187 % subpath)
1179
1188
1180 [l.sort() for l in r]
1189 [l.sort() for l in r]
1181 return r
1190 return r
1182
1191
1183 def heads(self, start=None):
1192 def heads(self, start=None):
1184 heads = self.changelog.heads(start)
1193 heads = self.changelog.heads(start)
1185 # sort the output in rev descending order
1194 # sort the output in rev descending order
1186 heads = [(-self.changelog.rev(h), h) for h in heads]
1195 heads = [(-self.changelog.rev(h), h) for h in heads]
1187 return [n for (r, n) in sorted(heads)]
1196 return [n for (r, n) in sorted(heads)]
1188
1197
1189 def branchheads(self, branch=None, start=None, closed=False):
1198 def branchheads(self, branch=None, start=None, closed=False):
1190 '''return a (possibly filtered) list of heads for the given branch
1199 '''return a (possibly filtered) list of heads for the given branch
1191
1200
1192 Heads are returned in topological order, from newest to oldest.
1201 Heads are returned in topological order, from newest to oldest.
1193 If branch is None, use the dirstate branch.
1202 If branch is None, use the dirstate branch.
1194 If start is not None, return only heads reachable from start.
1203 If start is not None, return only heads reachable from start.
1195 If closed is True, return heads that are marked as closed as well.
1204 If closed is True, return heads that are marked as closed as well.
1196 '''
1205 '''
1197 if branch is None:
1206 if branch is None:
1198 branch = self[None].branch()
1207 branch = self[None].branch()
1199 branches = self.branchmap()
1208 branches = self.branchmap()
1200 if branch not in branches:
1209 if branch not in branches:
1201 return []
1210 return []
1202 # the cache returns heads ordered lowest to highest
1211 # the cache returns heads ordered lowest to highest
1203 bheads = list(reversed(branches[branch]))
1212 bheads = list(reversed(branches[branch]))
1204 if start is not None:
1213 if start is not None:
1205 # filter out the heads that cannot be reached from startrev
1214 # filter out the heads that cannot be reached from startrev
1206 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1215 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1207 bheads = [h for h in bheads if h in fbheads]
1216 bheads = [h for h in bheads if h in fbheads]
1208 if not closed:
1217 if not closed:
1209 bheads = [h for h in bheads if
1218 bheads = [h for h in bheads if
1210 ('close' not in self.changelog.read(h)[5])]
1219 ('close' not in self.changelog.read(h)[5])]
1211 return bheads
1220 return bheads
1212
1221
1213 def branches(self, nodes):
1222 def branches(self, nodes):
1214 if not nodes:
1223 if not nodes:
1215 nodes = [self.changelog.tip()]
1224 nodes = [self.changelog.tip()]
1216 b = []
1225 b = []
1217 for n in nodes:
1226 for n in nodes:
1218 t = n
1227 t = n
1219 while 1:
1228 while 1:
1220 p = self.changelog.parents(n)
1229 p = self.changelog.parents(n)
1221 if p[1] != nullid or p[0] == nullid:
1230 if p[1] != nullid or p[0] == nullid:
1222 b.append((t, n, p[0], p[1]))
1231 b.append((t, n, p[0], p[1]))
1223 break
1232 break
1224 n = p[0]
1233 n = p[0]
1225 return b
1234 return b
1226
1235
1227 def between(self, pairs):
1236 def between(self, pairs):
1228 r = []
1237 r = []
1229
1238
1230 for top, bottom in pairs:
1239 for top, bottom in pairs:
1231 n, l, i = top, [], 0
1240 n, l, i = top, [], 0
1232 f = 1
1241 f = 1
1233
1242
1234 while n != bottom and n != nullid:
1243 while n != bottom and n != nullid:
1235 p = self.changelog.parents(n)[0]
1244 p = self.changelog.parents(n)[0]
1236 if i == f:
1245 if i == f:
1237 l.append(n)
1246 l.append(n)
1238 f = f * 2
1247 f = f * 2
1239 n = p
1248 n = p
1240 i += 1
1249 i += 1
1241
1250
1242 r.append(l)
1251 r.append(l)
1243
1252
1244 return r
1253 return r
1245
1254
1246 def pull(self, remote, heads=None, force=False):
1255 def pull(self, remote, heads=None, force=False):
1247 lock = self.lock()
1256 lock = self.lock()
1248 try:
1257 try:
1249 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1258 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1250 force=force)
1259 force=force)
1251 common, fetch, rheads = tmp
1260 common, fetch, rheads = tmp
1252 if not fetch:
1261 if not fetch:
1253 self.ui.status(_("no changes found\n"))
1262 self.ui.status(_("no changes found\n"))
1254 return 0
1263 return 0
1255
1264
1256 if fetch == [nullid]:
1265 if fetch == [nullid]:
1257 self.ui.status(_("requesting all changes\n"))
1266 self.ui.status(_("requesting all changes\n"))
1258 elif heads is None and remote.capable('changegroupsubset'):
1267 elif heads is None and remote.capable('changegroupsubset'):
1259 # issue1320, avoid a race if remote changed after discovery
1268 # issue1320, avoid a race if remote changed after discovery
1260 heads = rheads
1269 heads = rheads
1261
1270
1262 if heads is None:
1271 if heads is None:
1263 cg = remote.changegroup(fetch, 'pull')
1272 cg = remote.changegroup(fetch, 'pull')
1264 else:
1273 else:
1265 if not remote.capable('changegroupsubset'):
1274 if not remote.capable('changegroupsubset'):
1266 raise util.Abort(_("partial pull cannot be done because "
1275 raise util.Abort(_("partial pull cannot be done because "
1267 "other repository doesn't support "
1276 "other repository doesn't support "
1268 "changegroupsubset."))
1277 "changegroupsubset."))
1269 cg = remote.changegroupsubset(fetch, heads, 'pull')
1278 cg = remote.changegroupsubset(fetch, heads, 'pull')
1270 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1279 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1271 finally:
1280 finally:
1272 lock.release()
1281 lock.release()
1273
1282
1274 def push(self, remote, force=False, revs=None, newbranch=False):
1283 def push(self, remote, force=False, revs=None, newbranch=False):
1275 '''Push outgoing changesets (limited by revs) from the current
1284 '''Push outgoing changesets (limited by revs) from the current
1276 repository to remote. Return an integer:
1285 repository to remote. Return an integer:
1277 - 0 means HTTP error *or* nothing to push
1286 - 0 means HTTP error *or* nothing to push
1278 - 1 means we pushed and remote head count is unchanged *or*
1287 - 1 means we pushed and remote head count is unchanged *or*
1279 we have outgoing changesets but refused to push
1288 we have outgoing changesets but refused to push
1280 - other values as described by addchangegroup()
1289 - other values as described by addchangegroup()
1281 '''
1290 '''
1282 # there are two ways to push to remote repo:
1291 # there are two ways to push to remote repo:
1283 #
1292 #
1284 # addchangegroup assumes local user can lock remote
1293 # addchangegroup assumes local user can lock remote
1285 # repo (local filesystem, old ssh servers).
1294 # repo (local filesystem, old ssh servers).
1286 #
1295 #
1287 # unbundle assumes local user cannot lock remote repo (new ssh
1296 # unbundle assumes local user cannot lock remote repo (new ssh
1288 # servers, http servers).
1297 # servers, http servers).
1289
1298
1290 lock = None
1299 lock = None
1291 unbundle = remote.capable('unbundle')
1300 unbundle = remote.capable('unbundle')
1292 if not unbundle:
1301 if not unbundle:
1293 lock = remote.lock()
1302 lock = remote.lock()
1294 try:
1303 try:
1295 ret = discovery.prepush(self, remote, force, revs, newbranch)
1304 ret = discovery.prepush(self, remote, force, revs, newbranch)
1296 if ret[0] is None:
1305 if ret[0] is None:
1297 # and here we return 0 for "nothing to push" or 1 for
1306 # and here we return 0 for "nothing to push" or 1 for
1298 # "something to push but I refuse"
1307 # "something to push but I refuse"
1299 return ret[1]
1308 return ret[1]
1300
1309
1301 cg, remote_heads = ret
1310 cg, remote_heads = ret
1302 if unbundle:
1311 if unbundle:
1303 # local repo finds heads on server, finds out what revs it must
1312 # local repo finds heads on server, finds out what revs it must
1304 # push. once revs transferred, if server finds it has
1313 # push. once revs transferred, if server finds it has
1305 # different heads (someone else won commit/push race), server
1314 # different heads (someone else won commit/push race), server
1306 # aborts.
1315 # aborts.
1307 if force:
1316 if force:
1308 remote_heads = ['force']
1317 remote_heads = ['force']
1309 # ssh: return remote's addchangegroup()
1318 # ssh: return remote's addchangegroup()
1310 # http: return remote's addchangegroup() or 0 for error
1319 # http: return remote's addchangegroup() or 0 for error
1311 return remote.unbundle(cg, remote_heads, 'push')
1320 return remote.unbundle(cg, remote_heads, 'push')
1312 else:
1321 else:
1313 # we return an integer indicating remote head count change
1322 # we return an integer indicating remote head count change
1314 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1323 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1315 finally:
1324 finally:
1316 if lock is not None:
1325 if lock is not None:
1317 lock.release()
1326 lock.release()
1318
1327
1319 def changegroupinfo(self, nodes, source):
1328 def changegroupinfo(self, nodes, source):
1320 if self.ui.verbose or source == 'bundle':
1329 if self.ui.verbose or source == 'bundle':
1321 self.ui.status(_("%d changesets found\n") % len(nodes))
1330 self.ui.status(_("%d changesets found\n") % len(nodes))
1322 if self.ui.debugflag:
1331 if self.ui.debugflag:
1323 self.ui.debug("list of changesets:\n")
1332 self.ui.debug("list of changesets:\n")
1324 for node in nodes:
1333 for node in nodes:
1325 self.ui.debug("%s\n" % hex(node))
1334 self.ui.debug("%s\n" % hex(node))
1326
1335
1327 def changegroupsubset(self, bases, heads, source, extranodes=None):
1336 def changegroupsubset(self, bases, heads, source, extranodes=None):
1328 """Compute a changegroup consisting of all the nodes that are
1337 """Compute a changegroup consisting of all the nodes that are
1329 descendents of any of the bases and ancestors of any of the heads.
1338 descendents of any of the bases and ancestors of any of the heads.
1330 Return a chunkbuffer object whose read() method will return
1339 Return a chunkbuffer object whose read() method will return
1331 successive changegroup chunks.
1340 successive changegroup chunks.
1332
1341
1333 It is fairly complex as determining which filenodes and which
1342 It is fairly complex as determining which filenodes and which
1334 manifest nodes need to be included for the changeset to be complete
1343 manifest nodes need to be included for the changeset to be complete
1335 is non-trivial.
1344 is non-trivial.
1336
1345
1337 Another wrinkle is doing the reverse, figuring out which changeset in
1346 Another wrinkle is doing the reverse, figuring out which changeset in
1338 the changegroup a particular filenode or manifestnode belongs to.
1347 the changegroup a particular filenode or manifestnode belongs to.
1339
1348
1340 The caller can specify some nodes that must be included in the
1349 The caller can specify some nodes that must be included in the
1341 changegroup using the extranodes argument. It should be a dict
1350 changegroup using the extranodes argument. It should be a dict
1342 where the keys are the filenames (or 1 for the manifest), and the
1351 where the keys are the filenames (or 1 for the manifest), and the
1343 values are lists of (node, linknode) tuples, where node is a wanted
1352 values are lists of (node, linknode) tuples, where node is a wanted
1344 node and linknode is the changelog node that should be transmitted as
1353 node and linknode is the changelog node that should be transmitted as
1345 the linkrev.
1354 the linkrev.
1346 """
1355 """
1347
1356
1348 # Set up some initial variables
1357 # Set up some initial variables
1349 # Make it easy to refer to self.changelog
1358 # Make it easy to refer to self.changelog
1350 cl = self.changelog
1359 cl = self.changelog
1351 # Compute the list of changesets in this changegroup.
1360 # Compute the list of changesets in this changegroup.
1352 # Some bases may turn out to be superfluous, and some heads may be
1361 # Some bases may turn out to be superfluous, and some heads may be
1353 # too. nodesbetween will return the minimal set of bases and heads
1362 # too. nodesbetween will return the minimal set of bases and heads
1354 # necessary to re-create the changegroup.
1363 # necessary to re-create the changegroup.
1355 if not bases:
1364 if not bases:
1356 bases = [nullid]
1365 bases = [nullid]
1357 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1366 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1358
1367
1359 if extranodes is None:
1368 if extranodes is None:
1360 # can we go through the fast path ?
1369 # can we go through the fast path ?
1361 heads.sort()
1370 heads.sort()
1362 allheads = self.heads()
1371 allheads = self.heads()
1363 allheads.sort()
1372 allheads.sort()
1364 if heads == allheads:
1373 if heads == allheads:
1365 return self._changegroup(msng_cl_lst, source)
1374 return self._changegroup(msng_cl_lst, source)
1366
1375
1367 # slow path
1376 # slow path
1368 self.hook('preoutgoing', throw=True, source=source)
1377 self.hook('preoutgoing', throw=True, source=source)
1369
1378
1370 self.changegroupinfo(msng_cl_lst, source)
1379 self.changegroupinfo(msng_cl_lst, source)
1371
1380
1372 # We assume that all ancestors of bases are known
1381 # We assume that all ancestors of bases are known
1373 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1382 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1374
1383
1375 # Make it easy to refer to self.manifest
1384 # Make it easy to refer to self.manifest
1376 mnfst = self.manifest
1385 mnfst = self.manifest
1377 # We don't know which manifests are missing yet
1386 # We don't know which manifests are missing yet
1378 msng_mnfst_set = {}
1387 msng_mnfst_set = {}
1379 # Nor do we know which filenodes are missing.
1388 # Nor do we know which filenodes are missing.
1380 msng_filenode_set = {}
1389 msng_filenode_set = {}
1381
1390
1382 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1391 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1383 junk = None
1392 junk = None
1384
1393
1385 # A changeset always belongs to itself, so the changenode lookup
1394 # A changeset always belongs to itself, so the changenode lookup
1386 # function for a changenode is identity.
1395 # function for a changenode is identity.
1387 def identity(x):
1396 def identity(x):
1388 return x
1397 return x
1389
1398
1390 # A function generating function that sets up the initial environment
1399 # A function generating function that sets up the initial environment
1391 # the inner function.
1400 # the inner function.
1392 def filenode_collector(changedfiles):
1401 def filenode_collector(changedfiles):
1393 # This gathers information from each manifestnode included in the
1402 # This gathers information from each manifestnode included in the
1394 # changegroup about which filenodes the manifest node references
1403 # changegroup about which filenodes the manifest node references
1395 # so we can include those in the changegroup too.
1404 # so we can include those in the changegroup too.
1396 #
1405 #
1397 # It also remembers which changenode each filenode belongs to. It
1406 # It also remembers which changenode each filenode belongs to. It
1398 # does this by assuming the a filenode belongs to the changenode
1407 # does this by assuming the a filenode belongs to the changenode
1399 # the first manifest that references it belongs to.
1408 # the first manifest that references it belongs to.
1400 def collect_msng_filenodes(mnfstnode):
1409 def collect_msng_filenodes(mnfstnode):
1401 r = mnfst.rev(mnfstnode)
1410 r = mnfst.rev(mnfstnode)
1402 if r - 1 in mnfst.parentrevs(r):
1411 if r - 1 in mnfst.parentrevs(r):
1403 # If the previous rev is one of the parents,
1412 # If the previous rev is one of the parents,
1404 # we only need to see a diff.
1413 # we only need to see a diff.
1405 deltamf = mnfst.readdelta(mnfstnode)
1414 deltamf = mnfst.readdelta(mnfstnode)
1406 # For each line in the delta
1415 # For each line in the delta
1407 for f, fnode in deltamf.iteritems():
1416 for f, fnode in deltamf.iteritems():
1408 # And if the file is in the list of files we care
1417 # And if the file is in the list of files we care
1409 # about.
1418 # about.
1410 if f in changedfiles:
1419 if f in changedfiles:
1411 # Get the changenode this manifest belongs to
1420 # Get the changenode this manifest belongs to
1412 clnode = msng_mnfst_set[mnfstnode]
1421 clnode = msng_mnfst_set[mnfstnode]
1413 # Create the set of filenodes for the file if
1422 # Create the set of filenodes for the file if
1414 # there isn't one already.
1423 # there isn't one already.
1415 ndset = msng_filenode_set.setdefault(f, {})
1424 ndset = msng_filenode_set.setdefault(f, {})
1416 # And set the filenode's changelog node to the
1425 # And set the filenode's changelog node to the
1417 # manifest's if it hasn't been set already.
1426 # manifest's if it hasn't been set already.
1418 ndset.setdefault(fnode, clnode)
1427 ndset.setdefault(fnode, clnode)
1419 else:
1428 else:
1420 # Otherwise we need a full manifest.
1429 # Otherwise we need a full manifest.
1421 m = mnfst.read(mnfstnode)
1430 m = mnfst.read(mnfstnode)
1422 # For every file in we care about.
1431 # For every file in we care about.
1423 for f in changedfiles:
1432 for f in changedfiles:
1424 fnode = m.get(f, None)
1433 fnode = m.get(f, None)
1425 # If it's in the manifest
1434 # If it's in the manifest
1426 if fnode is not None:
1435 if fnode is not None:
1427 # See comments above.
1436 # See comments above.
1428 clnode = msng_mnfst_set[mnfstnode]
1437 clnode = msng_mnfst_set[mnfstnode]
1429 ndset = msng_filenode_set.setdefault(f, {})
1438 ndset = msng_filenode_set.setdefault(f, {})
1430 ndset.setdefault(fnode, clnode)
1439 ndset.setdefault(fnode, clnode)
1431 return collect_msng_filenodes
1440 return collect_msng_filenodes
1432
1441
1433 # If we determine that a particular file or manifest node must be a
1442 # If we determine that a particular file or manifest node must be a
1434 # node that the recipient of the changegroup will already have, we can
1443 # node that the recipient of the changegroup will already have, we can
1435 # also assume the recipient will have all the parents. This function
1444 # also assume the recipient will have all the parents. This function
1436 # prunes them from the set of missing nodes.
1445 # prunes them from the set of missing nodes.
1437 def prune(revlog, missingnodes):
1446 def prune(revlog, missingnodes):
1438 hasset = set()
1447 hasset = set()
1439 # If a 'missing' filenode thinks it belongs to a changenode we
1448 # If a 'missing' filenode thinks it belongs to a changenode we
1440 # assume the recipient must have, then the recipient must have
1449 # assume the recipient must have, then the recipient must have
1441 # that filenode.
1450 # that filenode.
1442 for n in missingnodes:
1451 for n in missingnodes:
1443 clrev = revlog.linkrev(revlog.rev(n))
1452 clrev = revlog.linkrev(revlog.rev(n))
1444 if clrev in commonrevs:
1453 if clrev in commonrevs:
1445 hasset.add(n)
1454 hasset.add(n)
1446 for n in hasset:
1455 for n in hasset:
1447 missingnodes.pop(n, None)
1456 missingnodes.pop(n, None)
1448 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1457 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1449 missingnodes.pop(revlog.node(r), None)
1458 missingnodes.pop(revlog.node(r), None)
1450
1459
1451 # Add the nodes that were explicitly requested.
1460 # Add the nodes that were explicitly requested.
1452 def add_extra_nodes(name, nodes):
1461 def add_extra_nodes(name, nodes):
1453 if not extranodes or name not in extranodes:
1462 if not extranodes or name not in extranodes:
1454 return
1463 return
1455
1464
1456 for node, linknode in extranodes[name]:
1465 for node, linknode in extranodes[name]:
1457 if node not in nodes:
1466 if node not in nodes:
1458 nodes[node] = linknode
1467 nodes[node] = linknode
1459
1468
1460 # Now that we have all theses utility functions to help out and
1469 # Now that we have all theses utility functions to help out and
1461 # logically divide up the task, generate the group.
1470 # logically divide up the task, generate the group.
1462 def gengroup():
1471 def gengroup():
1463 # The set of changed files starts empty.
1472 # The set of changed files starts empty.
1464 changedfiles = set()
1473 changedfiles = set()
1465 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1474 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1466
1475
1467 # Create a changenode group generator that will call our functions
1476 # Create a changenode group generator that will call our functions
1468 # back to lookup the owning changenode and collect information.
1477 # back to lookup the owning changenode and collect information.
1469 group = cl.group(msng_cl_lst, identity, collect)
1478 group = cl.group(msng_cl_lst, identity, collect)
1470 for cnt, chnk in enumerate(group):
1479 for cnt, chnk in enumerate(group):
1471 yield chnk
1480 yield chnk
1472 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1481 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1473 self.ui.progress(_('bundling changes'), None)
1482 self.ui.progress(_('bundling changes'), None)
1474
1483
1475 prune(mnfst, msng_mnfst_set)
1484 prune(mnfst, msng_mnfst_set)
1476 add_extra_nodes(1, msng_mnfst_set)
1485 add_extra_nodes(1, msng_mnfst_set)
1477 msng_mnfst_lst = msng_mnfst_set.keys()
1486 msng_mnfst_lst = msng_mnfst_set.keys()
1478 # Sort the manifestnodes by revision number.
1487 # Sort the manifestnodes by revision number.
1479 msng_mnfst_lst.sort(key=mnfst.rev)
1488 msng_mnfst_lst.sort(key=mnfst.rev)
1480 # Create a generator for the manifestnodes that calls our lookup
1489 # Create a generator for the manifestnodes that calls our lookup
1481 # and data collection functions back.
1490 # and data collection functions back.
1482 group = mnfst.group(msng_mnfst_lst,
1491 group = mnfst.group(msng_mnfst_lst,
1483 lambda mnode: msng_mnfst_set[mnode],
1492 lambda mnode: msng_mnfst_set[mnode],
1484 filenode_collector(changedfiles))
1493 filenode_collector(changedfiles))
1485 for cnt, chnk in enumerate(group):
1494 for cnt, chnk in enumerate(group):
1486 yield chnk
1495 yield chnk
1487 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1496 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1488 self.ui.progress(_('bundling manifests'), None)
1497 self.ui.progress(_('bundling manifests'), None)
1489
1498
1490 # These are no longer needed, dereference and toss the memory for
1499 # These are no longer needed, dereference and toss the memory for
1491 # them.
1500 # them.
1492 msng_mnfst_lst = None
1501 msng_mnfst_lst = None
1493 msng_mnfst_set.clear()
1502 msng_mnfst_set.clear()
1494
1503
1495 if extranodes:
1504 if extranodes:
1496 for fname in extranodes:
1505 for fname in extranodes:
1497 if isinstance(fname, int):
1506 if isinstance(fname, int):
1498 continue
1507 continue
1499 msng_filenode_set.setdefault(fname, {})
1508 msng_filenode_set.setdefault(fname, {})
1500 changedfiles.add(fname)
1509 changedfiles.add(fname)
1501 # Go through all our files in order sorted by name.
1510 # Go through all our files in order sorted by name.
1502 cnt = 0
1511 cnt = 0
1503 for fname in sorted(changedfiles):
1512 for fname in sorted(changedfiles):
1504 filerevlog = self.file(fname)
1513 filerevlog = self.file(fname)
1505 if not len(filerevlog):
1514 if not len(filerevlog):
1506 raise util.Abort(_("empty or missing revlog for %s") % fname)
1515 raise util.Abort(_("empty or missing revlog for %s") % fname)
1507 # Toss out the filenodes that the recipient isn't really
1516 # Toss out the filenodes that the recipient isn't really
1508 # missing.
1517 # missing.
1509 missingfnodes = msng_filenode_set.pop(fname, {})
1518 missingfnodes = msng_filenode_set.pop(fname, {})
1510 prune(filerevlog, missingfnodes)
1519 prune(filerevlog, missingfnodes)
1511 add_extra_nodes(fname, missingfnodes)
1520 add_extra_nodes(fname, missingfnodes)
1512 # If any filenodes are left, generate the group for them,
1521 # If any filenodes are left, generate the group for them,
1513 # otherwise don't bother.
1522 # otherwise don't bother.
1514 if missingfnodes:
1523 if missingfnodes:
1515 yield changegroup.chunkheader(len(fname))
1524 yield changegroup.chunkheader(len(fname))
1516 yield fname
1525 yield fname
1517 # Sort the filenodes by their revision # (topological order)
1526 # Sort the filenodes by their revision # (topological order)
1518 nodeiter = list(missingfnodes)
1527 nodeiter = list(missingfnodes)
1519 nodeiter.sort(key=filerevlog.rev)
1528 nodeiter.sort(key=filerevlog.rev)
1520 # Create a group generator and only pass in a changenode
1529 # Create a group generator and only pass in a changenode
1521 # lookup function as we need to collect no information
1530 # lookup function as we need to collect no information
1522 # from filenodes.
1531 # from filenodes.
1523 group = filerevlog.group(nodeiter,
1532 group = filerevlog.group(nodeiter,
1524 lambda fnode: missingfnodes[fnode])
1533 lambda fnode: missingfnodes[fnode])
1525 for chnk in group:
1534 for chnk in group:
1526 self.ui.progress(
1535 self.ui.progress(
1527 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1536 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1528 cnt += 1
1537 cnt += 1
1529 yield chnk
1538 yield chnk
1530 # Signal that no more groups are left.
1539 # Signal that no more groups are left.
1531 yield changegroup.closechunk()
1540 yield changegroup.closechunk()
1532 self.ui.progress(_('bundling files'), None)
1541 self.ui.progress(_('bundling files'), None)
1533
1542
1534 if msng_cl_lst:
1543 if msng_cl_lst:
1535 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1544 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1536
1545
1537 return util.chunkbuffer(gengroup())
1546 return util.chunkbuffer(gengroup())
1538
1547
1539 def changegroup(self, basenodes, source):
1548 def changegroup(self, basenodes, source):
1540 # to avoid a race we use changegroupsubset() (issue1320)
1549 # to avoid a race we use changegroupsubset() (issue1320)
1541 return self.changegroupsubset(basenodes, self.heads(), source)
1550 return self.changegroupsubset(basenodes, self.heads(), source)
1542
1551
1543 def _changegroup(self, nodes, source):
1552 def _changegroup(self, nodes, source):
1544 """Compute the changegroup of all nodes that we have that a recipient
1553 """Compute the changegroup of all nodes that we have that a recipient
1545 doesn't. Return a chunkbuffer object whose read() method will return
1554 doesn't. Return a chunkbuffer object whose read() method will return
1546 successive changegroup chunks.
1555 successive changegroup chunks.
1547
1556
1548 This is much easier than the previous function as we can assume that
1557 This is much easier than the previous function as we can assume that
1549 the recipient has any changenode we aren't sending them.
1558 the recipient has any changenode we aren't sending them.
1550
1559
1551 nodes is the set of nodes to send"""
1560 nodes is the set of nodes to send"""
1552
1561
1553 self.hook('preoutgoing', throw=True, source=source)
1562 self.hook('preoutgoing', throw=True, source=source)
1554
1563
1555 cl = self.changelog
1564 cl = self.changelog
1556 revset = set([cl.rev(n) for n in nodes])
1565 revset = set([cl.rev(n) for n in nodes])
1557 self.changegroupinfo(nodes, source)
1566 self.changegroupinfo(nodes, source)
1558
1567
1559 def identity(x):
1568 def identity(x):
1560 return x
1569 return x
1561
1570
1562 def gennodelst(log):
1571 def gennodelst(log):
1563 for r in log:
1572 for r in log:
1564 if log.linkrev(r) in revset:
1573 if log.linkrev(r) in revset:
1565 yield log.node(r)
1574 yield log.node(r)
1566
1575
1567 def lookuplinkrev_func(revlog):
1576 def lookuplinkrev_func(revlog):
1568 def lookuplinkrev(n):
1577 def lookuplinkrev(n):
1569 return cl.node(revlog.linkrev(revlog.rev(n)))
1578 return cl.node(revlog.linkrev(revlog.rev(n)))
1570 return lookuplinkrev
1579 return lookuplinkrev
1571
1580
1572 def gengroup():
1581 def gengroup():
1573 '''yield a sequence of changegroup chunks (strings)'''
1582 '''yield a sequence of changegroup chunks (strings)'''
1574 # construct a list of all changed files
1583 # construct a list of all changed files
1575 changedfiles = set()
1584 changedfiles = set()
1576 mmfs = {}
1585 mmfs = {}
1577 collect = changegroup.collector(cl, mmfs, changedfiles)
1586 collect = changegroup.collector(cl, mmfs, changedfiles)
1578
1587
1579 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1588 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1580 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1589 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1581 yield chnk
1590 yield chnk
1582 self.ui.progress(_('bundling changes'), None)
1591 self.ui.progress(_('bundling changes'), None)
1583
1592
1584 mnfst = self.manifest
1593 mnfst = self.manifest
1585 nodeiter = gennodelst(mnfst)
1594 nodeiter = gennodelst(mnfst)
1586 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1595 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1587 lookuplinkrev_func(mnfst))):
1596 lookuplinkrev_func(mnfst))):
1588 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1597 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1589 yield chnk
1598 yield chnk
1590 self.ui.progress(_('bundling manifests'), None)
1599 self.ui.progress(_('bundling manifests'), None)
1591
1600
1592 cnt = 0
1601 cnt = 0
1593 for fname in sorted(changedfiles):
1602 for fname in sorted(changedfiles):
1594 filerevlog = self.file(fname)
1603 filerevlog = self.file(fname)
1595 if not len(filerevlog):
1604 if not len(filerevlog):
1596 raise util.Abort(_("empty or missing revlog for %s") % fname)
1605 raise util.Abort(_("empty or missing revlog for %s") % fname)
1597 nodeiter = gennodelst(filerevlog)
1606 nodeiter = gennodelst(filerevlog)
1598 nodeiter = list(nodeiter)
1607 nodeiter = list(nodeiter)
1599 if nodeiter:
1608 if nodeiter:
1600 yield changegroup.chunkheader(len(fname))
1609 yield changegroup.chunkheader(len(fname))
1601 yield fname
1610 yield fname
1602 lookup = lookuplinkrev_func(filerevlog)
1611 lookup = lookuplinkrev_func(filerevlog)
1603 for chnk in filerevlog.group(nodeiter, lookup):
1612 for chnk in filerevlog.group(nodeiter, lookup):
1604 self.ui.progress(
1613 self.ui.progress(
1605 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1614 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1606 cnt += 1
1615 cnt += 1
1607 yield chnk
1616 yield chnk
1608 self.ui.progress(_('bundling files'), None)
1617 self.ui.progress(_('bundling files'), None)
1609
1618
1610 yield changegroup.closechunk()
1619 yield changegroup.closechunk()
1611
1620
1612 if nodes:
1621 if nodes:
1613 self.hook('outgoing', node=hex(nodes[0]), source=source)
1622 self.hook('outgoing', node=hex(nodes[0]), source=source)
1614
1623
1615 return util.chunkbuffer(gengroup())
1624 return util.chunkbuffer(gengroup())
1616
1625
1617 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1626 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1618 """Add the changegroup returned by source.read() to this repo.
1627 """Add the changegroup returned by source.read() to this repo.
1619 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1628 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1620 the URL of the repo where this changegroup is coming from.
1629 the URL of the repo where this changegroup is coming from.
1621
1630
1622 Return an integer summarizing the change to this repo:
1631 Return an integer summarizing the change to this repo:
1623 - nothing changed or no source: 0
1632 - nothing changed or no source: 0
1624 - more heads than before: 1+added heads (2..n)
1633 - more heads than before: 1+added heads (2..n)
1625 - fewer heads than before: -1-removed heads (-2..-n)
1634 - fewer heads than before: -1-removed heads (-2..-n)
1626 - number of heads stays the same: 1
1635 - number of heads stays the same: 1
1627 """
1636 """
1628 def csmap(x):
1637 def csmap(x):
1629 self.ui.debug("add changeset %s\n" % short(x))
1638 self.ui.debug("add changeset %s\n" % short(x))
1630 return len(cl)
1639 return len(cl)
1631
1640
1632 def revmap(x):
1641 def revmap(x):
1633 return cl.rev(x)
1642 return cl.rev(x)
1634
1643
1635 if not source:
1644 if not source:
1636 return 0
1645 return 0
1637
1646
1638 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1647 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1639
1648
1640 changesets = files = revisions = 0
1649 changesets = files = revisions = 0
1641 efiles = set()
1650 efiles = set()
1642
1651
1643 # write changelog data to temp files so concurrent readers will not see
1652 # write changelog data to temp files so concurrent readers will not see
1644 # inconsistent view
1653 # inconsistent view
1645 cl = self.changelog
1654 cl = self.changelog
1646 cl.delayupdate()
1655 cl.delayupdate()
1647 oldheads = len(cl.heads())
1656 oldheads = len(cl.heads())
1648
1657
1649 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1658 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1650 try:
1659 try:
1651 trp = weakref.proxy(tr)
1660 trp = weakref.proxy(tr)
1652 # pull off the changeset group
1661 # pull off the changeset group
1653 self.ui.status(_("adding changesets\n"))
1662 self.ui.status(_("adding changesets\n"))
1654 clstart = len(cl)
1663 clstart = len(cl)
1655 class prog(object):
1664 class prog(object):
1656 step = _('changesets')
1665 step = _('changesets')
1657 count = 1
1666 count = 1
1658 ui = self.ui
1667 ui = self.ui
1659 total = None
1668 total = None
1660 def __call__(self):
1669 def __call__(self):
1661 self.ui.progress(self.step, self.count, unit=_('chunks'),
1670 self.ui.progress(self.step, self.count, unit=_('chunks'),
1662 total=self.total)
1671 total=self.total)
1663 self.count += 1
1672 self.count += 1
1664 pr = prog()
1673 pr = prog()
1665 chunkiter = changegroup.chunkiter(source, progress=pr)
1674 chunkiter = changegroup.chunkiter(source, progress=pr)
1666 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1675 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1667 raise util.Abort(_("received changelog group is empty"))
1676 raise util.Abort(_("received changelog group is empty"))
1668 clend = len(cl)
1677 clend = len(cl)
1669 changesets = clend - clstart
1678 changesets = clend - clstart
1670 for c in xrange(clstart, clend):
1679 for c in xrange(clstart, clend):
1671 efiles.update(self[c].files())
1680 efiles.update(self[c].files())
1672 efiles = len(efiles)
1681 efiles = len(efiles)
1673 self.ui.progress(_('changesets'), None)
1682 self.ui.progress(_('changesets'), None)
1674
1683
1675 # pull off the manifest group
1684 # pull off the manifest group
1676 self.ui.status(_("adding manifests\n"))
1685 self.ui.status(_("adding manifests\n"))
1677 pr.step = _('manifests')
1686 pr.step = _('manifests')
1678 pr.count = 1
1687 pr.count = 1
1679 pr.total = changesets # manifests <= changesets
1688 pr.total = changesets # manifests <= changesets
1680 chunkiter = changegroup.chunkiter(source, progress=pr)
1689 chunkiter = changegroup.chunkiter(source, progress=pr)
1681 # no need to check for empty manifest group here:
1690 # no need to check for empty manifest group here:
1682 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1691 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1683 # no new manifest will be created and the manifest group will
1692 # no new manifest will be created and the manifest group will
1684 # be empty during the pull
1693 # be empty during the pull
1685 self.manifest.addgroup(chunkiter, revmap, trp)
1694 self.manifest.addgroup(chunkiter, revmap, trp)
1686 self.ui.progress(_('manifests'), None)
1695 self.ui.progress(_('manifests'), None)
1687
1696
1688 needfiles = {}
1697 needfiles = {}
1689 if self.ui.configbool('server', 'validate', default=False):
1698 if self.ui.configbool('server', 'validate', default=False):
1690 # validate incoming csets have their manifests
1699 # validate incoming csets have their manifests
1691 for cset in xrange(clstart, clend):
1700 for cset in xrange(clstart, clend):
1692 mfest = self.changelog.read(self.changelog.node(cset))[0]
1701 mfest = self.changelog.read(self.changelog.node(cset))[0]
1693 mfest = self.manifest.readdelta(mfest)
1702 mfest = self.manifest.readdelta(mfest)
1694 # store file nodes we must see
1703 # store file nodes we must see
1695 for f, n in mfest.iteritems():
1704 for f, n in mfest.iteritems():
1696 needfiles.setdefault(f, set()).add(n)
1705 needfiles.setdefault(f, set()).add(n)
1697
1706
1698 # process the files
1707 # process the files
1699 self.ui.status(_("adding file changes\n"))
1708 self.ui.status(_("adding file changes\n"))
1700 pr.step = 'files'
1709 pr.step = 'files'
1701 pr.count = 1
1710 pr.count = 1
1702 pr.total = efiles
1711 pr.total = efiles
1703 while 1:
1712 while 1:
1704 f = changegroup.getchunk(source)
1713 f = changegroup.getchunk(source)
1705 if not f:
1714 if not f:
1706 break
1715 break
1707 self.ui.debug("adding %s revisions\n" % f)
1716 self.ui.debug("adding %s revisions\n" % f)
1708 pr()
1717 pr()
1709 fl = self.file(f)
1718 fl = self.file(f)
1710 o = len(fl)
1719 o = len(fl)
1711 chunkiter = changegroup.chunkiter(source)
1720 chunkiter = changegroup.chunkiter(source)
1712 if fl.addgroup(chunkiter, revmap, trp) is None:
1721 if fl.addgroup(chunkiter, revmap, trp) is None:
1713 raise util.Abort(_("received file revlog group is empty"))
1722 raise util.Abort(_("received file revlog group is empty"))
1714 revisions += len(fl) - o
1723 revisions += len(fl) - o
1715 files += 1
1724 files += 1
1716 if f in needfiles:
1725 if f in needfiles:
1717 needs = needfiles[f]
1726 needs = needfiles[f]
1718 for new in xrange(o, len(fl)):
1727 for new in xrange(o, len(fl)):
1719 n = fl.node(new)
1728 n = fl.node(new)
1720 if n in needs:
1729 if n in needs:
1721 needs.remove(n)
1730 needs.remove(n)
1722 if not needs:
1731 if not needs:
1723 del needfiles[f]
1732 del needfiles[f]
1724 self.ui.progress(_('files'), None)
1733 self.ui.progress(_('files'), None)
1725
1734
1726 for f, needs in needfiles.iteritems():
1735 for f, needs in needfiles.iteritems():
1727 fl = self.file(f)
1736 fl = self.file(f)
1728 for n in needs:
1737 for n in needs:
1729 try:
1738 try:
1730 fl.rev(n)
1739 fl.rev(n)
1731 except error.LookupError:
1740 except error.LookupError:
1732 raise util.Abort(
1741 raise util.Abort(
1733 _('missing file data for %s:%s - run hg verify') %
1742 _('missing file data for %s:%s - run hg verify') %
1734 (f, hex(n)))
1743 (f, hex(n)))
1735
1744
1736 newheads = len(cl.heads())
1745 newheads = len(cl.heads())
1737 heads = ""
1746 heads = ""
1738 if oldheads and newheads != oldheads:
1747 if oldheads and newheads != oldheads:
1739 heads = _(" (%+d heads)") % (newheads - oldheads)
1748 heads = _(" (%+d heads)") % (newheads - oldheads)
1740
1749
1741 self.ui.status(_("added %d changesets"
1750 self.ui.status(_("added %d changesets"
1742 " with %d changes to %d files%s\n")
1751 " with %d changes to %d files%s\n")
1743 % (changesets, revisions, files, heads))
1752 % (changesets, revisions, files, heads))
1744
1753
1745 if changesets > 0:
1754 if changesets > 0:
1746 p = lambda: cl.writepending() and self.root or ""
1755 p = lambda: cl.writepending() and self.root or ""
1747 self.hook('pretxnchangegroup', throw=True,
1756 self.hook('pretxnchangegroup', throw=True,
1748 node=hex(cl.node(clstart)), source=srctype,
1757 node=hex(cl.node(clstart)), source=srctype,
1749 url=url, pending=p)
1758 url=url, pending=p)
1750
1759
1751 # make changelog see real files again
1760 # make changelog see real files again
1752 cl.finalize(trp)
1761 cl.finalize(trp)
1753
1762
1754 tr.close()
1763 tr.close()
1755 finally:
1764 finally:
1756 tr.release()
1765 tr.release()
1757 if lock:
1766 if lock:
1758 lock.release()
1767 lock.release()
1759
1768
1760 if changesets > 0:
1769 if changesets > 0:
1761 # forcefully update the on-disk branch cache
1770 # forcefully update the on-disk branch cache
1762 self.ui.debug("updating the branch cache\n")
1771 self.ui.debug("updating the branch cache\n")
1763 self.updatebranchcache()
1772 self.updatebranchcache()
1764 self.hook("changegroup", node=hex(cl.node(clstart)),
1773 self.hook("changegroup", node=hex(cl.node(clstart)),
1765 source=srctype, url=url)
1774 source=srctype, url=url)
1766
1775
1767 for i in xrange(clstart, clend):
1776 for i in xrange(clstart, clend):
1768 self.hook("incoming", node=hex(cl.node(i)),
1777 self.hook("incoming", node=hex(cl.node(i)),
1769 source=srctype, url=url)
1778 source=srctype, url=url)
1770
1779
1771 # never return 0 here:
1780 # never return 0 here:
1772 if newheads < oldheads:
1781 if newheads < oldheads:
1773 return newheads - oldheads - 1
1782 return newheads - oldheads - 1
1774 else:
1783 else:
1775 return newheads - oldheads + 1
1784 return newheads - oldheads + 1
1776
1785
1777
1786
1778 def stream_in(self, remote):
1787 def stream_in(self, remote, requirements):
1779 fp = remote.stream_out()
1788 fp = remote.stream_out()
1780 l = fp.readline()
1789 l = fp.readline()
1781 try:
1790 try:
1782 resp = int(l)
1791 resp = int(l)
1783 except ValueError:
1792 except ValueError:
1784 raise error.ResponseError(
1793 raise error.ResponseError(
1785 _('Unexpected response from remote server:'), l)
1794 _('Unexpected response from remote server:'), l)
1786 if resp == 1:
1795 if resp == 1:
1787 raise util.Abort(_('operation forbidden by server'))
1796 raise util.Abort(_('operation forbidden by server'))
1788 elif resp == 2:
1797 elif resp == 2:
1789 raise util.Abort(_('locking the remote repository failed'))
1798 raise util.Abort(_('locking the remote repository failed'))
1790 elif resp != 0:
1799 elif resp != 0:
1791 raise util.Abort(_('the server sent an unknown error code'))
1800 raise util.Abort(_('the server sent an unknown error code'))
1792 self.ui.status(_('streaming all changes\n'))
1801 self.ui.status(_('streaming all changes\n'))
1793 l = fp.readline()
1802 l = fp.readline()
1794 try:
1803 try:
1795 total_files, total_bytes = map(int, l.split(' ', 1))
1804 total_files, total_bytes = map(int, l.split(' ', 1))
1796 except (ValueError, TypeError):
1805 except (ValueError, TypeError):
1797 raise error.ResponseError(
1806 raise error.ResponseError(
1798 _('Unexpected response from remote server:'), l)
1807 _('Unexpected response from remote server:'), l)
1799 self.ui.status(_('%d files to transfer, %s of data\n') %
1808 self.ui.status(_('%d files to transfer, %s of data\n') %
1800 (total_files, util.bytecount(total_bytes)))
1809 (total_files, util.bytecount(total_bytes)))
1801 start = time.time()
1810 start = time.time()
1802 for i in xrange(total_files):
1811 for i in xrange(total_files):
1803 # XXX doesn't support '\n' or '\r' in filenames
1812 # XXX doesn't support '\n' or '\r' in filenames
1804 l = fp.readline()
1813 l = fp.readline()
1805 try:
1814 try:
1806 name, size = l.split('\0', 1)
1815 name, size = l.split('\0', 1)
1807 size = int(size)
1816 size = int(size)
1808 except (ValueError, TypeError):
1817 except (ValueError, TypeError):
1809 raise error.ResponseError(
1818 raise error.ResponseError(
1810 _('Unexpected response from remote server:'), l)
1819 _('Unexpected response from remote server:'), l)
1811 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1820 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1812 # for backwards compat, name was partially encoded
1821 # for backwards compat, name was partially encoded
1813 ofp = self.sopener(store.decodedir(name), 'w')
1822 ofp = self.sopener(store.decodedir(name), 'w')
1814 for chunk in util.filechunkiter(fp, limit=size):
1823 for chunk in util.filechunkiter(fp, limit=size):
1815 ofp.write(chunk)
1824 ofp.write(chunk)
1816 ofp.close()
1825 ofp.close()
1817 elapsed = time.time() - start
1826 elapsed = time.time() - start
1818 if elapsed <= 0:
1827 if elapsed <= 0:
1819 elapsed = 0.001
1828 elapsed = 0.001
1820 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1829 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1821 (util.bytecount(total_bytes), elapsed,
1830 (util.bytecount(total_bytes), elapsed,
1822 util.bytecount(total_bytes / elapsed)))
1831 util.bytecount(total_bytes / elapsed)))
1832
1833 # new requirements = old non-format requirements + new format-related
1834 # requirements from the streamed-in repository
1835 requirements.update(set(self.requirements) - self.supportedformats)
1836 self._applyrequirements(requirements)
1837 self._writerequirements()
1838
1823 self.invalidate()
1839 self.invalidate()
1824 return len(self.heads()) + 1
1840 return len(self.heads()) + 1
1825
1841
1826 def clone(self, remote, heads=[], stream=False):
1842 def clone(self, remote, heads=[], stream=False):
1827 '''clone remote repository.
1843 '''clone remote repository.
1828
1844
1829 keyword arguments:
1845 keyword arguments:
1830 heads: list of revs to clone (forces use of pull)
1846 heads: list of revs to clone (forces use of pull)
1831 stream: use streaming clone if possible'''
1847 stream: use streaming clone if possible'''
1832
1848
1833 # now, all clients that can request uncompressed clones can
1849 # now, all clients that can request uncompressed clones can
1834 # read repo formats supported by all servers that can serve
1850 # read repo formats supported by all servers that can serve
1835 # them.
1851 # them.
1836
1852
1837 # if revlog format changes, client will have to check version
1853 # if revlog format changes, client will have to check version
1838 # and format flags on "stream" capability, and use
1854 # and format flags on "stream" capability, and use
1839 # uncompressed only if compatible.
1855 # uncompressed only if compatible.
1840
1856
1841 if stream and not heads and remote.capable('stream'):
1857 if stream and not heads:
1842 return self.stream_in(remote)
1858 # 'stream' means remote revlog format is revlogv1 only
1859 if remote.capable('stream'):
1860 return self.stream_in(remote, set(('revlogv1',)))
1861 # otherwise, 'streamreqs' contains the remote revlog format
1862 streamreqs = remote.capable('streamreqs')
1863 if streamreqs:
1864 streamreqs = set(streamreqs.split(','))
1865 # if we support it, stream in and adjust our requirements
1866 if not streamreqs - self.supportedformats:
1867 return self.stream_in(remote, streamreqs)
1843 return self.pull(remote, heads)
1868 return self.pull(remote, heads)
1844
1869
1845 def pushkey(self, namespace, key, old, new):
1870 def pushkey(self, namespace, key, old, new):
1846 return pushkey.push(self, namespace, key, old, new)
1871 return pushkey.push(self, namespace, key, old, new)
1847
1872
1848 def listkeys(self, namespace):
1873 def listkeys(self, namespace):
1849 return pushkey.list(self, namespace)
1874 return pushkey.list(self, namespace)
1850
1875
1851 # used to avoid circular references so destructors work
1876 # used to avoid circular references so destructors work
1852 def aftertrans(files):
1877 def aftertrans(files):
1853 renamefiles = [tuple(t) for t in files]
1878 renamefiles = [tuple(t) for t in files]
1854 def a():
1879 def a():
1855 for src, dest in renamefiles:
1880 for src, dest in renamefiles:
1856 util.rename(src, dest)
1881 util.rename(src, dest)
1857 return a
1882 return a
1858
1883
1859 def instance(ui, path, create):
1884 def instance(ui, path, create):
1860 return localrepository(ui, util.drop_scheme('file', path), create)
1885 return localrepository(ui, util.drop_scheme('file', path), create)
1861
1886
1862 def islocal(path):
1887 def islocal(path):
1863 return True
1888 return True
@@ -1,326 +1,332 b''
1 # wireproto.py - generic wire protocol support functions
1 # wireproto.py - generic wire protocol support functions
2 #
2 #
3 # Copyright 2005-2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import urllib, tempfile, os, sys
8 import urllib, tempfile, os, sys
9 from i18n import _
9 from i18n import _
10 from node import bin, hex
10 from node import bin, hex
11 import changegroup as changegroupmod
11 import changegroup as changegroupmod
12 import repo, error, encoding, util, store
12 import repo, error, encoding, util, store
13 import pushkey as pushkeymod
13 import pushkey as pushkeymod
14
14
15 # list of nodes encoding / decoding
15 # list of nodes encoding / decoding
16
16
17 def decodelist(l, sep=' '):
17 def decodelist(l, sep=' '):
18 return map(bin, l.split(sep))
18 return map(bin, l.split(sep))
19
19
20 def encodelist(l, sep=' '):
20 def encodelist(l, sep=' '):
21 return sep.join(map(hex, l))
21 return sep.join(map(hex, l))
22
22
23 # client side
23 # client side
24
24
25 class wirerepository(repo.repository):
25 class wirerepository(repo.repository):
26 def lookup(self, key):
26 def lookup(self, key):
27 self.requirecap('lookup', _('look up remote revision'))
27 self.requirecap('lookup', _('look up remote revision'))
28 d = self._call("lookup", key=key)
28 d = self._call("lookup", key=key)
29 success, data = d[:-1].split(" ", 1)
29 success, data = d[:-1].split(" ", 1)
30 if int(success):
30 if int(success):
31 return bin(data)
31 return bin(data)
32 self._abort(error.RepoError(data))
32 self._abort(error.RepoError(data))
33
33
34 def heads(self):
34 def heads(self):
35 d = self._call("heads")
35 d = self._call("heads")
36 try:
36 try:
37 return decodelist(d[:-1])
37 return decodelist(d[:-1])
38 except:
38 except:
39 self._abort(error.ResponseError(_("unexpected response:"), d))
39 self._abort(error.ResponseError(_("unexpected response:"), d))
40
40
41 def branchmap(self):
41 def branchmap(self):
42 d = self._call("branchmap")
42 d = self._call("branchmap")
43 try:
43 try:
44 branchmap = {}
44 branchmap = {}
45 for branchpart in d.splitlines():
45 for branchpart in d.splitlines():
46 branchname, branchheads = branchpart.split(' ', 1)
46 branchname, branchheads = branchpart.split(' ', 1)
47 branchname = urllib.unquote(branchname)
47 branchname = urllib.unquote(branchname)
48 # Earlier servers (1.3.x) send branch names in (their) local
48 # Earlier servers (1.3.x) send branch names in (their) local
49 # charset. The best we can do is assume it's identical to our
49 # charset. The best we can do is assume it's identical to our
50 # own local charset, in case it's not utf-8.
50 # own local charset, in case it's not utf-8.
51 try:
51 try:
52 branchname.decode('utf-8')
52 branchname.decode('utf-8')
53 except UnicodeDecodeError:
53 except UnicodeDecodeError:
54 branchname = encoding.fromlocal(branchname)
54 branchname = encoding.fromlocal(branchname)
55 branchheads = decodelist(branchheads)
55 branchheads = decodelist(branchheads)
56 branchmap[branchname] = branchheads
56 branchmap[branchname] = branchheads
57 return branchmap
57 return branchmap
58 except TypeError:
58 except TypeError:
59 self._abort(error.ResponseError(_("unexpected response:"), d))
59 self._abort(error.ResponseError(_("unexpected response:"), d))
60
60
61 def branches(self, nodes):
61 def branches(self, nodes):
62 n = encodelist(nodes)
62 n = encodelist(nodes)
63 d = self._call("branches", nodes=n)
63 d = self._call("branches", nodes=n)
64 try:
64 try:
65 br = [tuple(decodelist(b)) for b in d.splitlines()]
65 br = [tuple(decodelist(b)) for b in d.splitlines()]
66 return br
66 return br
67 except:
67 except:
68 self._abort(error.ResponseError(_("unexpected response:"), d))
68 self._abort(error.ResponseError(_("unexpected response:"), d))
69
69
70 def between(self, pairs):
70 def between(self, pairs):
71 batch = 8 # avoid giant requests
71 batch = 8 # avoid giant requests
72 r = []
72 r = []
73 for i in xrange(0, len(pairs), batch):
73 for i in xrange(0, len(pairs), batch):
74 n = " ".join([encodelist(p, '-') for p in pairs[i:i + batch]])
74 n = " ".join([encodelist(p, '-') for p in pairs[i:i + batch]])
75 d = self._call("between", pairs=n)
75 d = self._call("between", pairs=n)
76 try:
76 try:
77 r.extend(l and decodelist(l) or [] for l in d.splitlines())
77 r.extend(l and decodelist(l) or [] for l in d.splitlines())
78 except:
78 except:
79 self._abort(error.ResponseError(_("unexpected response:"), d))
79 self._abort(error.ResponseError(_("unexpected response:"), d))
80 return r
80 return r
81
81
82 def pushkey(self, namespace, key, old, new):
82 def pushkey(self, namespace, key, old, new):
83 if not self.capable('pushkey'):
83 if not self.capable('pushkey'):
84 return False
84 return False
85 d = self._call("pushkey",
85 d = self._call("pushkey",
86 namespace=namespace, key=key, old=old, new=new)
86 namespace=namespace, key=key, old=old, new=new)
87 return bool(int(d))
87 return bool(int(d))
88
88
89 def listkeys(self, namespace):
89 def listkeys(self, namespace):
90 if not self.capable('pushkey'):
90 if not self.capable('pushkey'):
91 return {}
91 return {}
92 d = self._call("listkeys", namespace=namespace)
92 d = self._call("listkeys", namespace=namespace)
93 r = {}
93 r = {}
94 for l in d.splitlines():
94 for l in d.splitlines():
95 k, v = l.split('\t')
95 k, v = l.split('\t')
96 r[k.decode('string-escape')] = v.decode('string-escape')
96 r[k.decode('string-escape')] = v.decode('string-escape')
97 return r
97 return r
98
98
99 def stream_out(self):
99 def stream_out(self):
100 return self._callstream('stream_out')
100 return self._callstream('stream_out')
101
101
102 def changegroup(self, nodes, kind):
102 def changegroup(self, nodes, kind):
103 n = encodelist(nodes)
103 n = encodelist(nodes)
104 f = self._callstream("changegroup", roots=n)
104 f = self._callstream("changegroup", roots=n)
105 return self._decompress(f)
105 return self._decompress(f)
106
106
107 def changegroupsubset(self, bases, heads, kind):
107 def changegroupsubset(self, bases, heads, kind):
108 self.requirecap('changegroupsubset', _('look up remote changes'))
108 self.requirecap('changegroupsubset', _('look up remote changes'))
109 bases = encodelist(bases)
109 bases = encodelist(bases)
110 heads = encodelist(heads)
110 heads = encodelist(heads)
111 return self._decompress(self._callstream("changegroupsubset",
111 return self._decompress(self._callstream("changegroupsubset",
112 bases=bases, heads=heads))
112 bases=bases, heads=heads))
113
113
114 def unbundle(self, cg, heads, source):
114 def unbundle(self, cg, heads, source):
115 '''Send cg (a readable file-like object representing the
115 '''Send cg (a readable file-like object representing the
116 changegroup to push, typically a chunkbuffer object) to the
116 changegroup to push, typically a chunkbuffer object) to the
117 remote server as a bundle. Return an integer indicating the
117 remote server as a bundle. Return an integer indicating the
118 result of the push (see localrepository.addchangegroup()).'''
118 result of the push (see localrepository.addchangegroup()).'''
119
119
120 ret, output = self._callpush("unbundle", cg, heads=encodelist(heads))
120 ret, output = self._callpush("unbundle", cg, heads=encodelist(heads))
121 if ret == "":
121 if ret == "":
122 raise error.ResponseError(
122 raise error.ResponseError(
123 _('push failed:'), output)
123 _('push failed:'), output)
124 try:
124 try:
125 ret = int(ret)
125 ret = int(ret)
126 except ValueError:
126 except ValueError:
127 raise error.ResponseError(
127 raise error.ResponseError(
128 _('push failed (unexpected response):'), ret)
128 _('push failed (unexpected response):'), ret)
129
129
130 for l in output.splitlines(True):
130 for l in output.splitlines(True):
131 self.ui.status(_('remote: '), l)
131 self.ui.status(_('remote: '), l)
132 return ret
132 return ret
133
133
134 # server side
134 # server side
135
135
136 class streamres(object):
136 class streamres(object):
137 def __init__(self, gen):
137 def __init__(self, gen):
138 self.gen = gen
138 self.gen = gen
139
139
140 class pushres(object):
140 class pushres(object):
141 def __init__(self, res):
141 def __init__(self, res):
142 self.res = res
142 self.res = res
143
143
144 def dispatch(repo, proto, command):
144 def dispatch(repo, proto, command):
145 func, spec = commands[command]
145 func, spec = commands[command]
146 args = proto.getargs(spec)
146 args = proto.getargs(spec)
147 return func(repo, proto, *args)
147 return func(repo, proto, *args)
148
148
149 def between(repo, proto, pairs):
149 def between(repo, proto, pairs):
150 pairs = [decodelist(p, '-') for p in pairs.split(" ")]
150 pairs = [decodelist(p, '-') for p in pairs.split(" ")]
151 r = []
151 r = []
152 for b in repo.between(pairs):
152 for b in repo.between(pairs):
153 r.append(encodelist(b) + "\n")
153 r.append(encodelist(b) + "\n")
154 return "".join(r)
154 return "".join(r)
155
155
156 def branchmap(repo, proto):
156 def branchmap(repo, proto):
157 branchmap = repo.branchmap()
157 branchmap = repo.branchmap()
158 heads = []
158 heads = []
159 for branch, nodes in branchmap.iteritems():
159 for branch, nodes in branchmap.iteritems():
160 branchname = urllib.quote(branch)
160 branchname = urllib.quote(branch)
161 branchnodes = encodelist(nodes)
161 branchnodes = encodelist(nodes)
162 heads.append('%s %s' % (branchname, branchnodes))
162 heads.append('%s %s' % (branchname, branchnodes))
163 return '\n'.join(heads)
163 return '\n'.join(heads)
164
164
165 def branches(repo, proto, nodes):
165 def branches(repo, proto, nodes):
166 nodes = decodelist(nodes)
166 nodes = decodelist(nodes)
167 r = []
167 r = []
168 for b in repo.branches(nodes):
168 for b in repo.branches(nodes):
169 r.append(encodelist(b) + "\n")
169 r.append(encodelist(b) + "\n")
170 return "".join(r)
170 return "".join(r)
171
171
172 def capabilities(repo, proto):
172 def capabilities(repo, proto):
173 caps = 'lookup changegroupsubset branchmap pushkey'.split()
173 caps = 'lookup changegroupsubset branchmap pushkey'.split()
174 if _allowstream(repo.ui):
174 if _allowstream(repo.ui):
175 caps.append('stream=%d' % repo.changelog.version)
175 requiredformats = repo.requirements & repo.supportedformats
176 # if our local revlogs are just revlogv1, add 'stream' cap
177 if not requiredformats - set(('revlogv1',)):
178 caps.append('stream')
179 # otherwise, add 'streamreqs' detailing our local revlog format
180 else:
181 caps.append('streamreqs=%s' % ','.join(requiredformats))
176 caps.append('unbundle=%s' % ','.join(changegroupmod.bundlepriority))
182 caps.append('unbundle=%s' % ','.join(changegroupmod.bundlepriority))
177 return ' '.join(caps)
183 return ' '.join(caps)
178
184
179 def changegroup(repo, proto, roots):
185 def changegroup(repo, proto, roots):
180 nodes = decodelist(roots)
186 nodes = decodelist(roots)
181 cg = repo.changegroup(nodes, 'serve')
187 cg = repo.changegroup(nodes, 'serve')
182 return streamres(proto.groupchunks(cg))
188 return streamres(proto.groupchunks(cg))
183
189
184 def changegroupsubset(repo, proto, bases, heads):
190 def changegroupsubset(repo, proto, bases, heads):
185 bases = decodelist(bases)
191 bases = decodelist(bases)
186 heads = decodelist(heads)
192 heads = decodelist(heads)
187 cg = repo.changegroupsubset(bases, heads, 'serve')
193 cg = repo.changegroupsubset(bases, heads, 'serve')
188 return streamres(proto.groupchunks(cg))
194 return streamres(proto.groupchunks(cg))
189
195
190 def heads(repo, proto):
196 def heads(repo, proto):
191 h = repo.heads()
197 h = repo.heads()
192 return encodelist(h) + "\n"
198 return encodelist(h) + "\n"
193
199
194 def hello(repo, proto):
200 def hello(repo, proto):
195 '''the hello command returns a set of lines describing various
201 '''the hello command returns a set of lines describing various
196 interesting things about the server, in an RFC822-like format.
202 interesting things about the server, in an RFC822-like format.
197 Currently the only one defined is "capabilities", which
203 Currently the only one defined is "capabilities", which
198 consists of a line in the form:
204 consists of a line in the form:
199
205
200 capabilities: space separated list of tokens
206 capabilities: space separated list of tokens
201 '''
207 '''
202 return "capabilities: %s\n" % (capabilities(repo, proto))
208 return "capabilities: %s\n" % (capabilities(repo, proto))
203
209
204 def listkeys(repo, proto, namespace):
210 def listkeys(repo, proto, namespace):
205 d = pushkeymod.list(repo, namespace).items()
211 d = pushkeymod.list(repo, namespace).items()
206 t = '\n'.join(['%s\t%s' % (k.encode('string-escape'),
212 t = '\n'.join(['%s\t%s' % (k.encode('string-escape'),
207 v.encode('string-escape')) for k, v in d])
213 v.encode('string-escape')) for k, v in d])
208 return t
214 return t
209
215
210 def lookup(repo, proto, key):
216 def lookup(repo, proto, key):
211 try:
217 try:
212 r = hex(repo.lookup(key))
218 r = hex(repo.lookup(key))
213 success = 1
219 success = 1
214 except Exception, inst:
220 except Exception, inst:
215 r = str(inst)
221 r = str(inst)
216 success = 0
222 success = 0
217 return "%s %s\n" % (success, r)
223 return "%s %s\n" % (success, r)
218
224
219 def pushkey(repo, proto, namespace, key, old, new):
225 def pushkey(repo, proto, namespace, key, old, new):
220 r = pushkeymod.push(repo, namespace, key, old, new)
226 r = pushkeymod.push(repo, namespace, key, old, new)
221 return '%s\n' % int(r)
227 return '%s\n' % int(r)
222
228
223 def _allowstream(ui):
229 def _allowstream(ui):
224 return ui.configbool('server', 'uncompressed', True, untrusted=True)
230 return ui.configbool('server', 'uncompressed', True, untrusted=True)
225
231
226 def stream(repo, proto):
232 def stream(repo, proto):
227 '''If the server supports streaming clone, it advertises the "stream"
233 '''If the server supports streaming clone, it advertises the "stream"
228 capability with a value representing the version and flags of the repo
234 capability with a value representing the version and flags of the repo
229 it is serving. Client checks to see if it understands the format.
235 it is serving. Client checks to see if it understands the format.
230
236
231 The format is simple: the server writes out a line with the amount
237 The format is simple: the server writes out a line with the amount
232 of files, then the total amount of bytes to be transfered (separated
238 of files, then the total amount of bytes to be transfered (separated
233 by a space). Then, for each file, the server first writes the filename
239 by a space). Then, for each file, the server first writes the filename
234 and filesize (separated by the null character), then the file contents.
240 and filesize (separated by the null character), then the file contents.
235 '''
241 '''
236
242
237 if not _allowstream(repo.ui):
243 if not _allowstream(repo.ui):
238 return '1\n'
244 return '1\n'
239
245
240 entries = []
246 entries = []
241 total_bytes = 0
247 total_bytes = 0
242 try:
248 try:
243 # get consistent snapshot of repo, lock during scan
249 # get consistent snapshot of repo, lock during scan
244 lock = repo.lock()
250 lock = repo.lock()
245 try:
251 try:
246 repo.ui.debug('scanning\n')
252 repo.ui.debug('scanning\n')
247 for name, ename, size in repo.store.walk():
253 for name, ename, size in repo.store.walk():
248 entries.append((name, size))
254 entries.append((name, size))
249 total_bytes += size
255 total_bytes += size
250 finally:
256 finally:
251 lock.release()
257 lock.release()
252 except error.LockError:
258 except error.LockError:
253 return '2\n' # error: 2
259 return '2\n' # error: 2
254
260
255 def streamer(repo, entries, total):
261 def streamer(repo, entries, total):
256 '''stream out all metadata files in repository.'''
262 '''stream out all metadata files in repository.'''
257 yield '0\n' # success
263 yield '0\n' # success
258 repo.ui.debug('%d files, %d bytes to transfer\n' %
264 repo.ui.debug('%d files, %d bytes to transfer\n' %
259 (len(entries), total_bytes))
265 (len(entries), total_bytes))
260 yield '%d %d\n' % (len(entries), total_bytes)
266 yield '%d %d\n' % (len(entries), total_bytes)
261 for name, size in entries:
267 for name, size in entries:
262 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
268 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
263 # partially encode name over the wire for backwards compat
269 # partially encode name over the wire for backwards compat
264 yield '%s\0%d\n' % (store.encodedir(name), size)
270 yield '%s\0%d\n' % (store.encodedir(name), size)
265 for chunk in util.filechunkiter(repo.sopener(name), limit=size):
271 for chunk in util.filechunkiter(repo.sopener(name), limit=size):
266 yield chunk
272 yield chunk
267
273
268 return streamres(streamer(repo, entries, total_bytes))
274 return streamres(streamer(repo, entries, total_bytes))
269
275
270 def unbundle(repo, proto, heads):
276 def unbundle(repo, proto, heads):
271 their_heads = decodelist(heads)
277 their_heads = decodelist(heads)
272
278
273 def check_heads():
279 def check_heads():
274 heads = repo.heads()
280 heads = repo.heads()
275 return their_heads == ['force'] or their_heads == heads
281 return their_heads == ['force'] or their_heads == heads
276
282
277 # fail early if possible
283 # fail early if possible
278 if not check_heads():
284 if not check_heads():
279 return 'unsynced changes'
285 return 'unsynced changes'
280
286
281 # write bundle data to temporary file because it can be big
287 # write bundle data to temporary file because it can be big
282 fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
288 fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
283 fp = os.fdopen(fd, 'wb+')
289 fp = os.fdopen(fd, 'wb+')
284 r = 0
290 r = 0
285 proto.redirect()
291 proto.redirect()
286 try:
292 try:
287 proto.getfile(fp)
293 proto.getfile(fp)
288 lock = repo.lock()
294 lock = repo.lock()
289 try:
295 try:
290 if not check_heads():
296 if not check_heads():
291 # someone else committed/pushed/unbundled while we
297 # someone else committed/pushed/unbundled while we
292 # were transferring data
298 # were transferring data
293 return 'unsynced changes'
299 return 'unsynced changes'
294
300
295 # push can proceed
301 # push can proceed
296 fp.seek(0)
302 fp.seek(0)
297 gen = changegroupmod.readbundle(fp, None)
303 gen = changegroupmod.readbundle(fp, None)
298
304
299 try:
305 try:
300 r = repo.addchangegroup(gen, 'serve', proto._client(),
306 r = repo.addchangegroup(gen, 'serve', proto._client(),
301 lock=lock)
307 lock=lock)
302 except util.Abort, inst:
308 except util.Abort, inst:
303 sys.stderr.write("abort: %s\n" % inst)
309 sys.stderr.write("abort: %s\n" % inst)
304 finally:
310 finally:
305 lock.release()
311 lock.release()
306 return pushres(r)
312 return pushres(r)
307
313
308 finally:
314 finally:
309 fp.close()
315 fp.close()
310 os.unlink(tempname)
316 os.unlink(tempname)
311
317
312 commands = {
318 commands = {
313 'between': (between, 'pairs'),
319 'between': (between, 'pairs'),
314 'branchmap': (branchmap, ''),
320 'branchmap': (branchmap, ''),
315 'branches': (branches, 'nodes'),
321 'branches': (branches, 'nodes'),
316 'capabilities': (capabilities, ''),
322 'capabilities': (capabilities, ''),
317 'changegroup': (changegroup, 'roots'),
323 'changegroup': (changegroup, 'roots'),
318 'changegroupsubset': (changegroupsubset, 'bases heads'),
324 'changegroupsubset': (changegroupsubset, 'bases heads'),
319 'heads': (heads, ''),
325 'heads': (heads, ''),
320 'hello': (hello, ''),
326 'hello': (hello, ''),
321 'listkeys': (listkeys, 'namespace'),
327 'listkeys': (listkeys, 'namespace'),
322 'lookup': (lookup, 'key'),
328 'lookup': (lookup, 'key'),
323 'pushkey': (pushkey, 'namespace key old new'),
329 'pushkey': (pushkey, 'namespace key old new'),
324 'stream_out': (stream, ''),
330 'stream_out': (stream, ''),
325 'unbundle': (unbundle, 'heads'),
331 'unbundle': (unbundle, 'heads'),
326 }
332 }
General Comments 0
You need to be logged in to leave comments. Login now