##// END OF EJS Templates
localrepo: add auditor attribute which knows about subrepos
Martin Geisler -
r12162:af8c4929 default
parent child Browse files
Show More
@@ -1,1805 +1,1844
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 import url as urlmod
17 import url as urlmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 propertycache = util.propertycache
20 propertycache = util.propertycache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
24 supported = set('revlogv1 store fncache shared parentdelta'.split())
24 supported = set('revlogv1 store fncache shared parentdelta'.split())
25
25
26 def __init__(self, baseui, path=None, create=0):
26 def __init__(self, baseui, path=None, create=0):
27 repo.repository.__init__(self)
27 repo.repository.__init__(self)
28 self.root = os.path.realpath(util.expandpath(path))
28 self.root = os.path.realpath(util.expandpath(path))
29 self.path = os.path.join(self.root, ".hg")
29 self.path = os.path.join(self.root, ".hg")
30 self.origroot = path
30 self.origroot = path
31 self.auditor = util.path_auditor(self.root, self._checknested)
31 self.opener = util.opener(self.path)
32 self.opener = util.opener(self.path)
32 self.wopener = util.opener(self.root)
33 self.wopener = util.opener(self.root)
33 self.baseui = baseui
34 self.baseui = baseui
34 self.ui = baseui.copy()
35 self.ui = baseui.copy()
35
36
36 try:
37 try:
37 self.ui.readconfig(self.join("hgrc"), self.root)
38 self.ui.readconfig(self.join("hgrc"), self.root)
38 extensions.loadall(self.ui)
39 extensions.loadall(self.ui)
39 except IOError:
40 except IOError:
40 pass
41 pass
41
42
42 if not os.path.isdir(self.path):
43 if not os.path.isdir(self.path):
43 if create:
44 if create:
44 if not os.path.exists(path):
45 if not os.path.exists(path):
45 util.makedirs(path)
46 util.makedirs(path)
46 os.mkdir(self.path)
47 os.mkdir(self.path)
47 requirements = ["revlogv1"]
48 requirements = ["revlogv1"]
48 if self.ui.configbool('format', 'usestore', True):
49 if self.ui.configbool('format', 'usestore', True):
49 os.mkdir(os.path.join(self.path, "store"))
50 os.mkdir(os.path.join(self.path, "store"))
50 requirements.append("store")
51 requirements.append("store")
51 if self.ui.configbool('format', 'usefncache', True):
52 if self.ui.configbool('format', 'usefncache', True):
52 requirements.append("fncache")
53 requirements.append("fncache")
53 # create an invalid changelog
54 # create an invalid changelog
54 self.opener("00changelog.i", "a").write(
55 self.opener("00changelog.i", "a").write(
55 '\0\0\0\2' # represents revlogv2
56 '\0\0\0\2' # represents revlogv2
56 ' dummy changelog to prevent using the old repo layout'
57 ' dummy changelog to prevent using the old repo layout'
57 )
58 )
58 if self.ui.configbool('format', 'parentdelta', False):
59 if self.ui.configbool('format', 'parentdelta', False):
59 requirements.append("parentdelta")
60 requirements.append("parentdelta")
60 reqfile = self.opener("requires", "w")
61 reqfile = self.opener("requires", "w")
61 for r in requirements:
62 for r in requirements:
62 reqfile.write("%s\n" % r)
63 reqfile.write("%s\n" % r)
63 reqfile.close()
64 reqfile.close()
64 else:
65 else:
65 raise error.RepoError(_("repository %s not found") % path)
66 raise error.RepoError(_("repository %s not found") % path)
66 elif create:
67 elif create:
67 raise error.RepoError(_("repository %s already exists") % path)
68 raise error.RepoError(_("repository %s already exists") % path)
68 else:
69 else:
69 # find requirements
70 # find requirements
70 requirements = set()
71 requirements = set()
71 try:
72 try:
72 requirements = set(self.opener("requires").read().splitlines())
73 requirements = set(self.opener("requires").read().splitlines())
73 except IOError, inst:
74 except IOError, inst:
74 if inst.errno != errno.ENOENT:
75 if inst.errno != errno.ENOENT:
75 raise
76 raise
76 for r in requirements - self.supported:
77 for r in requirements - self.supported:
77 raise error.RepoError(_("requirement '%s' not supported") % r)
78 raise error.RepoError(_("requirement '%s' not supported") % r)
78
79
79 self.sharedpath = self.path
80 self.sharedpath = self.path
80 try:
81 try:
81 s = os.path.realpath(self.opener("sharedpath").read())
82 s = os.path.realpath(self.opener("sharedpath").read())
82 if not os.path.exists(s):
83 if not os.path.exists(s):
83 raise error.RepoError(
84 raise error.RepoError(
84 _('.hg/sharedpath points to nonexistent directory %s') % s)
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
85 self.sharedpath = s
86 self.sharedpath = s
86 except IOError, inst:
87 except IOError, inst:
87 if inst.errno != errno.ENOENT:
88 if inst.errno != errno.ENOENT:
88 raise
89 raise
89
90
90 self.store = store.store(requirements, self.sharedpath, util.opener)
91 self.store = store.store(requirements, self.sharedpath, util.opener)
91 self.spath = self.store.path
92 self.spath = self.store.path
92 self.sopener = self.store.opener
93 self.sopener = self.store.opener
93 self.sjoin = self.store.join
94 self.sjoin = self.store.join
94 self.opener.createmode = self.store.createmode
95 self.opener.createmode = self.store.createmode
95 self.sopener.options = {}
96 self.sopener.options = {}
96 if 'parentdelta' in requirements:
97 if 'parentdelta' in requirements:
97 self.sopener.options['parentdelta'] = 1
98 self.sopener.options['parentdelta'] = 1
98
99
99 # These two define the set of tags for this repository. _tags
100 # These two define the set of tags for this repository. _tags
100 # maps tag name to node; _tagtypes maps tag name to 'global' or
101 # maps tag name to node; _tagtypes maps tag name to 'global' or
101 # 'local'. (Global tags are defined by .hgtags across all
102 # 'local'. (Global tags are defined by .hgtags across all
102 # heads, and local tags are defined in .hg/localtags.) They
103 # heads, and local tags are defined in .hg/localtags.) They
103 # constitute the in-memory cache of tags.
104 # constitute the in-memory cache of tags.
104 self._tags = None
105 self._tags = None
105 self._tagtypes = None
106 self._tagtypes = None
106
107
107 self._branchcache = None # in UTF-8
108 self._branchcache = None # in UTF-8
108 self._branchcachetip = None
109 self._branchcachetip = None
109 self.nodetagscache = None
110 self.nodetagscache = None
110 self.filterpats = {}
111 self.filterpats = {}
111 self._datafilters = {}
112 self._datafilters = {}
112 self._transref = self._lockref = self._wlockref = None
113 self._transref = self._lockref = self._wlockref = None
113
114
115 def _checknested(self, path):
116 """Determine if path is a legal nested repository."""
117 if not path.startswith(self.root):
118 return False
119 subpath = path[len(self.root) + 1:]
120
121 # XXX: Checking against the current working copy is wrong in
122 # the sense that it can reject things like
123 #
124 # $ hg cat -r 10 sub/x.txt
125 #
126 # if sub/ is no longer a subrepository in the working copy
127 # parent revision.
128 #
129 # However, it can of course also allow things that would have
130 # been rejected before, such as the above cat command if sub/
131 # is a subrepository now, but was a normal directory before.
132 # The old path auditor would have rejected by mistake since it
133 # panics when it sees sub/.hg/.
134 #
135 # All in all, checking against the working copy parent
136 # revision seems sensible since we want to prevent access to
137 # nested repositories on the filesystem *now*.
138 ctx = self['.']
139 parts = util.splitpath(subpath)
140 while parts:
141 prefix = os.sep.join(parts)
142 if prefix in ctx.substate:
143 if prefix == subpath:
144 return True
145 else:
146 sub = ctx.sub(prefix)
147 return sub.checknested(subpath[len(prefix) + 1:])
148 else:
149 parts.pop()
150 return False
151
152
114 @propertycache
153 @propertycache
115 def changelog(self):
154 def changelog(self):
116 c = changelog.changelog(self.sopener)
155 c = changelog.changelog(self.sopener)
117 if 'HG_PENDING' in os.environ:
156 if 'HG_PENDING' in os.environ:
118 p = os.environ['HG_PENDING']
157 p = os.environ['HG_PENDING']
119 if p.startswith(self.root):
158 if p.startswith(self.root):
120 c.readpending('00changelog.i.a')
159 c.readpending('00changelog.i.a')
121 self.sopener.options['defversion'] = c.version
160 self.sopener.options['defversion'] = c.version
122 return c
161 return c
123
162
124 @propertycache
163 @propertycache
125 def manifest(self):
164 def manifest(self):
126 return manifest.manifest(self.sopener)
165 return manifest.manifest(self.sopener)
127
166
128 @propertycache
167 @propertycache
129 def dirstate(self):
168 def dirstate(self):
130 return dirstate.dirstate(self.opener, self.ui, self.root)
169 return dirstate.dirstate(self.opener, self.ui, self.root)
131
170
132 def __getitem__(self, changeid):
171 def __getitem__(self, changeid):
133 if changeid is None:
172 if changeid is None:
134 return context.workingctx(self)
173 return context.workingctx(self)
135 return context.changectx(self, changeid)
174 return context.changectx(self, changeid)
136
175
137 def __contains__(self, changeid):
176 def __contains__(self, changeid):
138 try:
177 try:
139 return bool(self.lookup(changeid))
178 return bool(self.lookup(changeid))
140 except error.RepoLookupError:
179 except error.RepoLookupError:
141 return False
180 return False
142
181
143 def __nonzero__(self):
182 def __nonzero__(self):
144 return True
183 return True
145
184
146 def __len__(self):
185 def __len__(self):
147 return len(self.changelog)
186 return len(self.changelog)
148
187
149 def __iter__(self):
188 def __iter__(self):
150 for i in xrange(len(self)):
189 for i in xrange(len(self)):
151 yield i
190 yield i
152
191
153 def url(self):
192 def url(self):
154 return 'file:' + self.root
193 return 'file:' + self.root
155
194
156 def hook(self, name, throw=False, **args):
195 def hook(self, name, throw=False, **args):
157 return hook.hook(self.ui, self, name, throw, **args)
196 return hook.hook(self.ui, self, name, throw, **args)
158
197
159 tag_disallowed = ':\r\n'
198 tag_disallowed = ':\r\n'
160
199
161 def _tag(self, names, node, message, local, user, date, extra={}):
200 def _tag(self, names, node, message, local, user, date, extra={}):
162 if isinstance(names, str):
201 if isinstance(names, str):
163 allchars = names
202 allchars = names
164 names = (names,)
203 names = (names,)
165 else:
204 else:
166 allchars = ''.join(names)
205 allchars = ''.join(names)
167 for c in self.tag_disallowed:
206 for c in self.tag_disallowed:
168 if c in allchars:
207 if c in allchars:
169 raise util.Abort(_('%r cannot be used in a tag name') % c)
208 raise util.Abort(_('%r cannot be used in a tag name') % c)
170
209
171 branches = self.branchmap()
210 branches = self.branchmap()
172 for name in names:
211 for name in names:
173 self.hook('pretag', throw=True, node=hex(node), tag=name,
212 self.hook('pretag', throw=True, node=hex(node), tag=name,
174 local=local)
213 local=local)
175 if name in branches:
214 if name in branches:
176 self.ui.warn(_("warning: tag %s conflicts with existing"
215 self.ui.warn(_("warning: tag %s conflicts with existing"
177 " branch name\n") % name)
216 " branch name\n") % name)
178
217
179 def writetags(fp, names, munge, prevtags):
218 def writetags(fp, names, munge, prevtags):
180 fp.seek(0, 2)
219 fp.seek(0, 2)
181 if prevtags and prevtags[-1] != '\n':
220 if prevtags and prevtags[-1] != '\n':
182 fp.write('\n')
221 fp.write('\n')
183 for name in names:
222 for name in names:
184 m = munge and munge(name) or name
223 m = munge and munge(name) or name
185 if self._tagtypes and name in self._tagtypes:
224 if self._tagtypes and name in self._tagtypes:
186 old = self._tags.get(name, nullid)
225 old = self._tags.get(name, nullid)
187 fp.write('%s %s\n' % (hex(old), m))
226 fp.write('%s %s\n' % (hex(old), m))
188 fp.write('%s %s\n' % (hex(node), m))
227 fp.write('%s %s\n' % (hex(node), m))
189 fp.close()
228 fp.close()
190
229
191 prevtags = ''
230 prevtags = ''
192 if local:
231 if local:
193 try:
232 try:
194 fp = self.opener('localtags', 'r+')
233 fp = self.opener('localtags', 'r+')
195 except IOError:
234 except IOError:
196 fp = self.opener('localtags', 'a')
235 fp = self.opener('localtags', 'a')
197 else:
236 else:
198 prevtags = fp.read()
237 prevtags = fp.read()
199
238
200 # local tags are stored in the current charset
239 # local tags are stored in the current charset
201 writetags(fp, names, None, prevtags)
240 writetags(fp, names, None, prevtags)
202 for name in names:
241 for name in names:
203 self.hook('tag', node=hex(node), tag=name, local=local)
242 self.hook('tag', node=hex(node), tag=name, local=local)
204 return
243 return
205
244
206 try:
245 try:
207 fp = self.wfile('.hgtags', 'rb+')
246 fp = self.wfile('.hgtags', 'rb+')
208 except IOError:
247 except IOError:
209 fp = self.wfile('.hgtags', 'ab')
248 fp = self.wfile('.hgtags', 'ab')
210 else:
249 else:
211 prevtags = fp.read()
250 prevtags = fp.read()
212
251
213 # committed tags are stored in UTF-8
252 # committed tags are stored in UTF-8
214 writetags(fp, names, encoding.fromlocal, prevtags)
253 writetags(fp, names, encoding.fromlocal, prevtags)
215
254
216 if '.hgtags' not in self.dirstate:
255 if '.hgtags' not in self.dirstate:
217 self[None].add(['.hgtags'])
256 self[None].add(['.hgtags'])
218
257
219 m = matchmod.exact(self.root, '', ['.hgtags'])
258 m = matchmod.exact(self.root, '', ['.hgtags'])
220 tagnode = self.commit(message, user, date, extra=extra, match=m)
259 tagnode = self.commit(message, user, date, extra=extra, match=m)
221
260
222 for name in names:
261 for name in names:
223 self.hook('tag', node=hex(node), tag=name, local=local)
262 self.hook('tag', node=hex(node), tag=name, local=local)
224
263
225 return tagnode
264 return tagnode
226
265
227 def tag(self, names, node, message, local, user, date):
266 def tag(self, names, node, message, local, user, date):
228 '''tag a revision with one or more symbolic names.
267 '''tag a revision with one or more symbolic names.
229
268
230 names is a list of strings or, when adding a single tag, names may be a
269 names is a list of strings or, when adding a single tag, names may be a
231 string.
270 string.
232
271
233 if local is True, the tags are stored in a per-repository file.
272 if local is True, the tags are stored in a per-repository file.
234 otherwise, they are stored in the .hgtags file, and a new
273 otherwise, they are stored in the .hgtags file, and a new
235 changeset is committed with the change.
274 changeset is committed with the change.
236
275
237 keyword arguments:
276 keyword arguments:
238
277
239 local: whether to store tags in non-version-controlled file
278 local: whether to store tags in non-version-controlled file
240 (default False)
279 (default False)
241
280
242 message: commit message to use if committing
281 message: commit message to use if committing
243
282
244 user: name of user to use if committing
283 user: name of user to use if committing
245
284
246 date: date tuple to use if committing'''
285 date: date tuple to use if committing'''
247
286
248 for x in self.status()[:5]:
287 for x in self.status()[:5]:
249 if '.hgtags' in x:
288 if '.hgtags' in x:
250 raise util.Abort(_('working copy of .hgtags is changed '
289 raise util.Abort(_('working copy of .hgtags is changed '
251 '(please commit .hgtags manually)'))
290 '(please commit .hgtags manually)'))
252
291
253 self.tags() # instantiate the cache
292 self.tags() # instantiate the cache
254 self._tag(names, node, message, local, user, date)
293 self._tag(names, node, message, local, user, date)
255
294
256 def tags(self):
295 def tags(self):
257 '''return a mapping of tag to node'''
296 '''return a mapping of tag to node'''
258 if self._tags is None:
297 if self._tags is None:
259 (self._tags, self._tagtypes) = self._findtags()
298 (self._tags, self._tagtypes) = self._findtags()
260
299
261 return self._tags
300 return self._tags
262
301
263 def _findtags(self):
302 def _findtags(self):
264 '''Do the hard work of finding tags. Return a pair of dicts
303 '''Do the hard work of finding tags. Return a pair of dicts
265 (tags, tagtypes) where tags maps tag name to node, and tagtypes
304 (tags, tagtypes) where tags maps tag name to node, and tagtypes
266 maps tag name to a string like \'global\' or \'local\'.
305 maps tag name to a string like \'global\' or \'local\'.
267 Subclasses or extensions are free to add their own tags, but
306 Subclasses or extensions are free to add their own tags, but
268 should be aware that the returned dicts will be retained for the
307 should be aware that the returned dicts will be retained for the
269 duration of the localrepo object.'''
308 duration of the localrepo object.'''
270
309
271 # XXX what tagtype should subclasses/extensions use? Currently
310 # XXX what tagtype should subclasses/extensions use? Currently
272 # mq and bookmarks add tags, but do not set the tagtype at all.
311 # mq and bookmarks add tags, but do not set the tagtype at all.
273 # Should each extension invent its own tag type? Should there
312 # Should each extension invent its own tag type? Should there
274 # be one tagtype for all such "virtual" tags? Or is the status
313 # be one tagtype for all such "virtual" tags? Or is the status
275 # quo fine?
314 # quo fine?
276
315
277 alltags = {} # map tag name to (node, hist)
316 alltags = {} # map tag name to (node, hist)
278 tagtypes = {}
317 tagtypes = {}
279
318
280 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
319 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
281 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
320 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
282
321
283 # Build the return dicts. Have to re-encode tag names because
322 # Build the return dicts. Have to re-encode tag names because
284 # the tags module always uses UTF-8 (in order not to lose info
323 # the tags module always uses UTF-8 (in order not to lose info
285 # writing to the cache), but the rest of Mercurial wants them in
324 # writing to the cache), but the rest of Mercurial wants them in
286 # local encoding.
325 # local encoding.
287 tags = {}
326 tags = {}
288 for (name, (node, hist)) in alltags.iteritems():
327 for (name, (node, hist)) in alltags.iteritems():
289 if node != nullid:
328 if node != nullid:
290 tags[encoding.tolocal(name)] = node
329 tags[encoding.tolocal(name)] = node
291 tags['tip'] = self.changelog.tip()
330 tags['tip'] = self.changelog.tip()
292 tagtypes = dict([(encoding.tolocal(name), value)
331 tagtypes = dict([(encoding.tolocal(name), value)
293 for (name, value) in tagtypes.iteritems()])
332 for (name, value) in tagtypes.iteritems()])
294 return (tags, tagtypes)
333 return (tags, tagtypes)
295
334
296 def tagtype(self, tagname):
335 def tagtype(self, tagname):
297 '''
336 '''
298 return the type of the given tag. result can be:
337 return the type of the given tag. result can be:
299
338
300 'local' : a local tag
339 'local' : a local tag
301 'global' : a global tag
340 'global' : a global tag
302 None : tag does not exist
341 None : tag does not exist
303 '''
342 '''
304
343
305 self.tags()
344 self.tags()
306
345
307 return self._tagtypes.get(tagname)
346 return self._tagtypes.get(tagname)
308
347
309 def tagslist(self):
348 def tagslist(self):
310 '''return a list of tags ordered by revision'''
349 '''return a list of tags ordered by revision'''
311 l = []
350 l = []
312 for t, n in self.tags().iteritems():
351 for t, n in self.tags().iteritems():
313 try:
352 try:
314 r = self.changelog.rev(n)
353 r = self.changelog.rev(n)
315 except:
354 except:
316 r = -2 # sort to the beginning of the list if unknown
355 r = -2 # sort to the beginning of the list if unknown
317 l.append((r, t, n))
356 l.append((r, t, n))
318 return [(t, n) for r, t, n in sorted(l)]
357 return [(t, n) for r, t, n in sorted(l)]
319
358
320 def nodetags(self, node):
359 def nodetags(self, node):
321 '''return the tags associated with a node'''
360 '''return the tags associated with a node'''
322 if not self.nodetagscache:
361 if not self.nodetagscache:
323 self.nodetagscache = {}
362 self.nodetagscache = {}
324 for t, n in self.tags().iteritems():
363 for t, n in self.tags().iteritems():
325 self.nodetagscache.setdefault(n, []).append(t)
364 self.nodetagscache.setdefault(n, []).append(t)
326 for tags in self.nodetagscache.itervalues():
365 for tags in self.nodetagscache.itervalues():
327 tags.sort()
366 tags.sort()
328 return self.nodetagscache.get(node, [])
367 return self.nodetagscache.get(node, [])
329
368
330 def _branchtags(self, partial, lrev):
369 def _branchtags(self, partial, lrev):
331 # TODO: rename this function?
370 # TODO: rename this function?
332 tiprev = len(self) - 1
371 tiprev = len(self) - 1
333 if lrev != tiprev:
372 if lrev != tiprev:
334 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
373 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
335 self._updatebranchcache(partial, ctxgen)
374 self._updatebranchcache(partial, ctxgen)
336 self._writebranchcache(partial, self.changelog.tip(), tiprev)
375 self._writebranchcache(partial, self.changelog.tip(), tiprev)
337
376
338 return partial
377 return partial
339
378
340 def updatebranchcache(self):
379 def updatebranchcache(self):
341 tip = self.changelog.tip()
380 tip = self.changelog.tip()
342 if self._branchcache is not None and self._branchcachetip == tip:
381 if self._branchcache is not None and self._branchcachetip == tip:
343 return self._branchcache
382 return self._branchcache
344
383
345 oldtip = self._branchcachetip
384 oldtip = self._branchcachetip
346 self._branchcachetip = tip
385 self._branchcachetip = tip
347 if oldtip is None or oldtip not in self.changelog.nodemap:
386 if oldtip is None or oldtip not in self.changelog.nodemap:
348 partial, last, lrev = self._readbranchcache()
387 partial, last, lrev = self._readbranchcache()
349 else:
388 else:
350 lrev = self.changelog.rev(oldtip)
389 lrev = self.changelog.rev(oldtip)
351 partial = self._branchcache
390 partial = self._branchcache
352
391
353 self._branchtags(partial, lrev)
392 self._branchtags(partial, lrev)
354 # this private cache holds all heads (not just tips)
393 # this private cache holds all heads (not just tips)
355 self._branchcache = partial
394 self._branchcache = partial
356
395
357 def branchmap(self):
396 def branchmap(self):
358 '''returns a dictionary {branch: [branchheads]}'''
397 '''returns a dictionary {branch: [branchheads]}'''
359 self.updatebranchcache()
398 self.updatebranchcache()
360 return self._branchcache
399 return self._branchcache
361
400
362 def branchtags(self):
401 def branchtags(self):
363 '''return a dict where branch names map to the tipmost head of
402 '''return a dict where branch names map to the tipmost head of
364 the branch, open heads come before closed'''
403 the branch, open heads come before closed'''
365 bt = {}
404 bt = {}
366 for bn, heads in self.branchmap().iteritems():
405 for bn, heads in self.branchmap().iteritems():
367 tip = heads[-1]
406 tip = heads[-1]
368 for h in reversed(heads):
407 for h in reversed(heads):
369 if 'close' not in self.changelog.read(h)[5]:
408 if 'close' not in self.changelog.read(h)[5]:
370 tip = h
409 tip = h
371 break
410 break
372 bt[bn] = tip
411 bt[bn] = tip
373 return bt
412 return bt
374
413
375
414
376 def _readbranchcache(self):
415 def _readbranchcache(self):
377 partial = {}
416 partial = {}
378 try:
417 try:
379 f = self.opener("branchheads.cache")
418 f = self.opener("branchheads.cache")
380 lines = f.read().split('\n')
419 lines = f.read().split('\n')
381 f.close()
420 f.close()
382 except (IOError, OSError):
421 except (IOError, OSError):
383 return {}, nullid, nullrev
422 return {}, nullid, nullrev
384
423
385 try:
424 try:
386 last, lrev = lines.pop(0).split(" ", 1)
425 last, lrev = lines.pop(0).split(" ", 1)
387 last, lrev = bin(last), int(lrev)
426 last, lrev = bin(last), int(lrev)
388 if lrev >= len(self) or self[lrev].node() != last:
427 if lrev >= len(self) or self[lrev].node() != last:
389 # invalidate the cache
428 # invalidate the cache
390 raise ValueError('invalidating branch cache (tip differs)')
429 raise ValueError('invalidating branch cache (tip differs)')
391 for l in lines:
430 for l in lines:
392 if not l:
431 if not l:
393 continue
432 continue
394 node, label = l.split(" ", 1)
433 node, label = l.split(" ", 1)
395 partial.setdefault(label.strip(), []).append(bin(node))
434 partial.setdefault(label.strip(), []).append(bin(node))
396 except KeyboardInterrupt:
435 except KeyboardInterrupt:
397 raise
436 raise
398 except Exception, inst:
437 except Exception, inst:
399 if self.ui.debugflag:
438 if self.ui.debugflag:
400 self.ui.warn(str(inst), '\n')
439 self.ui.warn(str(inst), '\n')
401 partial, last, lrev = {}, nullid, nullrev
440 partial, last, lrev = {}, nullid, nullrev
402 return partial, last, lrev
441 return partial, last, lrev
403
442
404 def _writebranchcache(self, branches, tip, tiprev):
443 def _writebranchcache(self, branches, tip, tiprev):
405 try:
444 try:
406 f = self.opener("branchheads.cache", "w", atomictemp=True)
445 f = self.opener("branchheads.cache", "w", atomictemp=True)
407 f.write("%s %s\n" % (hex(tip), tiprev))
446 f.write("%s %s\n" % (hex(tip), tiprev))
408 for label, nodes in branches.iteritems():
447 for label, nodes in branches.iteritems():
409 for node in nodes:
448 for node in nodes:
410 f.write("%s %s\n" % (hex(node), label))
449 f.write("%s %s\n" % (hex(node), label))
411 f.rename()
450 f.rename()
412 except (IOError, OSError):
451 except (IOError, OSError):
413 pass
452 pass
414
453
415 def _updatebranchcache(self, partial, ctxgen):
454 def _updatebranchcache(self, partial, ctxgen):
416 # collect new branch entries
455 # collect new branch entries
417 newbranches = {}
456 newbranches = {}
418 for c in ctxgen:
457 for c in ctxgen:
419 newbranches.setdefault(c.branch(), []).append(c.node())
458 newbranches.setdefault(c.branch(), []).append(c.node())
420 # if older branchheads are reachable from new ones, they aren't
459 # if older branchheads are reachable from new ones, they aren't
421 # really branchheads. Note checking parents is insufficient:
460 # really branchheads. Note checking parents is insufficient:
422 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
461 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
423 for branch, newnodes in newbranches.iteritems():
462 for branch, newnodes in newbranches.iteritems():
424 bheads = partial.setdefault(branch, [])
463 bheads = partial.setdefault(branch, [])
425 bheads.extend(newnodes)
464 bheads.extend(newnodes)
426 if len(bheads) <= 1:
465 if len(bheads) <= 1:
427 continue
466 continue
428 # starting from tip means fewer passes over reachable
467 # starting from tip means fewer passes over reachable
429 while newnodes:
468 while newnodes:
430 latest = newnodes.pop()
469 latest = newnodes.pop()
431 if latest not in bheads:
470 if latest not in bheads:
432 continue
471 continue
433 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
472 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
434 reachable = self.changelog.reachable(latest, minbhrev)
473 reachable = self.changelog.reachable(latest, minbhrev)
435 reachable.remove(latest)
474 reachable.remove(latest)
436 bheads = [b for b in bheads if b not in reachable]
475 bheads = [b for b in bheads if b not in reachable]
437 partial[branch] = bheads
476 partial[branch] = bheads
438
477
439 def lookup(self, key):
478 def lookup(self, key):
440 if isinstance(key, int):
479 if isinstance(key, int):
441 return self.changelog.node(key)
480 return self.changelog.node(key)
442 elif key == '.':
481 elif key == '.':
443 return self.dirstate.parents()[0]
482 return self.dirstate.parents()[0]
444 elif key == 'null':
483 elif key == 'null':
445 return nullid
484 return nullid
446 elif key == 'tip':
485 elif key == 'tip':
447 return self.changelog.tip()
486 return self.changelog.tip()
448 n = self.changelog._match(key)
487 n = self.changelog._match(key)
449 if n:
488 if n:
450 return n
489 return n
451 if key in self.tags():
490 if key in self.tags():
452 return self.tags()[key]
491 return self.tags()[key]
453 if key in self.branchtags():
492 if key in self.branchtags():
454 return self.branchtags()[key]
493 return self.branchtags()[key]
455 n = self.changelog._partialmatch(key)
494 n = self.changelog._partialmatch(key)
456 if n:
495 if n:
457 return n
496 return n
458
497
459 # can't find key, check if it might have come from damaged dirstate
498 # can't find key, check if it might have come from damaged dirstate
460 if key in self.dirstate.parents():
499 if key in self.dirstate.parents():
461 raise error.Abort(_("working directory has unknown parent '%s'!")
500 raise error.Abort(_("working directory has unknown parent '%s'!")
462 % short(key))
501 % short(key))
463 try:
502 try:
464 if len(key) == 20:
503 if len(key) == 20:
465 key = hex(key)
504 key = hex(key)
466 except:
505 except:
467 pass
506 pass
468 raise error.RepoLookupError(_("unknown revision '%s'") % key)
507 raise error.RepoLookupError(_("unknown revision '%s'") % key)
469
508
470 def lookupbranch(self, key, remote=None):
509 def lookupbranch(self, key, remote=None):
471 repo = remote or self
510 repo = remote or self
472 if key in repo.branchmap():
511 if key in repo.branchmap():
473 return key
512 return key
474
513
475 repo = (remote and remote.local()) and remote or self
514 repo = (remote and remote.local()) and remote or self
476 return repo[key].branch()
515 return repo[key].branch()
477
516
478 def local(self):
517 def local(self):
479 return True
518 return True
480
519
481 def join(self, f):
520 def join(self, f):
482 return os.path.join(self.path, f)
521 return os.path.join(self.path, f)
483
522
484 def wjoin(self, f):
523 def wjoin(self, f):
485 return os.path.join(self.root, f)
524 return os.path.join(self.root, f)
486
525
487 def file(self, f):
526 def file(self, f):
488 if f[0] == '/':
527 if f[0] == '/':
489 f = f[1:]
528 f = f[1:]
490 return filelog.filelog(self.sopener, f)
529 return filelog.filelog(self.sopener, f)
491
530
492 def changectx(self, changeid):
531 def changectx(self, changeid):
493 return self[changeid]
532 return self[changeid]
494
533
495 def parents(self, changeid=None):
534 def parents(self, changeid=None):
496 '''get list of changectxs for parents of changeid'''
535 '''get list of changectxs for parents of changeid'''
497 return self[changeid].parents()
536 return self[changeid].parents()
498
537
499 def filectx(self, path, changeid=None, fileid=None):
538 def filectx(self, path, changeid=None, fileid=None):
500 """changeid can be a changeset revision, node, or tag.
539 """changeid can be a changeset revision, node, or tag.
501 fileid can be a file revision or node."""
540 fileid can be a file revision or node."""
502 return context.filectx(self, path, changeid, fileid)
541 return context.filectx(self, path, changeid, fileid)
503
542
504 def getcwd(self):
543 def getcwd(self):
505 return self.dirstate.getcwd()
544 return self.dirstate.getcwd()
506
545
507 def pathto(self, f, cwd=None):
546 def pathto(self, f, cwd=None):
508 return self.dirstate.pathto(f, cwd)
547 return self.dirstate.pathto(f, cwd)
509
548
510 def wfile(self, f, mode='r'):
549 def wfile(self, f, mode='r'):
511 return self.wopener(f, mode)
550 return self.wopener(f, mode)
512
551
513 def _link(self, f):
552 def _link(self, f):
514 return os.path.islink(self.wjoin(f))
553 return os.path.islink(self.wjoin(f))
515
554
516 def _loadfilter(self, filter):
555 def _loadfilter(self, filter):
517 if filter not in self.filterpats:
556 if filter not in self.filterpats:
518 l = []
557 l = []
519 for pat, cmd in self.ui.configitems(filter):
558 for pat, cmd in self.ui.configitems(filter):
520 if cmd == '!':
559 if cmd == '!':
521 continue
560 continue
522 mf = matchmod.match(self.root, '', [pat])
561 mf = matchmod.match(self.root, '', [pat])
523 fn = None
562 fn = None
524 params = cmd
563 params = cmd
525 for name, filterfn in self._datafilters.iteritems():
564 for name, filterfn in self._datafilters.iteritems():
526 if cmd.startswith(name):
565 if cmd.startswith(name):
527 fn = filterfn
566 fn = filterfn
528 params = cmd[len(name):].lstrip()
567 params = cmd[len(name):].lstrip()
529 break
568 break
530 if not fn:
569 if not fn:
531 fn = lambda s, c, **kwargs: util.filter(s, c)
570 fn = lambda s, c, **kwargs: util.filter(s, c)
532 # Wrap old filters not supporting keyword arguments
571 # Wrap old filters not supporting keyword arguments
533 if not inspect.getargspec(fn)[2]:
572 if not inspect.getargspec(fn)[2]:
534 oldfn = fn
573 oldfn = fn
535 fn = lambda s, c, **kwargs: oldfn(s, c)
574 fn = lambda s, c, **kwargs: oldfn(s, c)
536 l.append((mf, fn, params))
575 l.append((mf, fn, params))
537 self.filterpats[filter] = l
576 self.filterpats[filter] = l
538
577
539 def _filter(self, filter, filename, data):
578 def _filter(self, filter, filename, data):
540 self._loadfilter(filter)
579 self._loadfilter(filter)
541
580
542 for mf, fn, cmd in self.filterpats[filter]:
581 for mf, fn, cmd in self.filterpats[filter]:
543 if mf(filename):
582 if mf(filename):
544 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
583 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
545 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
584 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
546 break
585 break
547
586
548 return data
587 return data
549
588
550 def adddatafilter(self, name, filter):
589 def adddatafilter(self, name, filter):
551 self._datafilters[name] = filter
590 self._datafilters[name] = filter
552
591
553 def wread(self, filename):
592 def wread(self, filename):
554 if self._link(filename):
593 if self._link(filename):
555 data = os.readlink(self.wjoin(filename))
594 data = os.readlink(self.wjoin(filename))
556 else:
595 else:
557 data = self.wopener(filename, 'r').read()
596 data = self.wopener(filename, 'r').read()
558 return self._filter("encode", filename, data)
597 return self._filter("encode", filename, data)
559
598
560 def wwrite(self, filename, data, flags):
599 def wwrite(self, filename, data, flags):
561 data = self._filter("decode", filename, data)
600 data = self._filter("decode", filename, data)
562 try:
601 try:
563 os.unlink(self.wjoin(filename))
602 os.unlink(self.wjoin(filename))
564 except OSError:
603 except OSError:
565 pass
604 pass
566 if 'l' in flags:
605 if 'l' in flags:
567 self.wopener.symlink(data, filename)
606 self.wopener.symlink(data, filename)
568 else:
607 else:
569 self.wopener(filename, 'w').write(data)
608 self.wopener(filename, 'w').write(data)
570 if 'x' in flags:
609 if 'x' in flags:
571 util.set_flags(self.wjoin(filename), False, True)
610 util.set_flags(self.wjoin(filename), False, True)
572
611
573 def wwritedata(self, filename, data):
612 def wwritedata(self, filename, data):
574 return self._filter("decode", filename, data)
613 return self._filter("decode", filename, data)
575
614
576 def transaction(self, desc):
615 def transaction(self, desc):
577 tr = self._transref and self._transref() or None
616 tr = self._transref and self._transref() or None
578 if tr and tr.running():
617 if tr and tr.running():
579 return tr.nest()
618 return tr.nest()
580
619
581 # abort here if the journal already exists
620 # abort here if the journal already exists
582 if os.path.exists(self.sjoin("journal")):
621 if os.path.exists(self.sjoin("journal")):
583 raise error.RepoError(
622 raise error.RepoError(
584 _("abandoned transaction found - run hg recover"))
623 _("abandoned transaction found - run hg recover"))
585
624
586 # save dirstate for rollback
625 # save dirstate for rollback
587 try:
626 try:
588 ds = self.opener("dirstate").read()
627 ds = self.opener("dirstate").read()
589 except IOError:
628 except IOError:
590 ds = ""
629 ds = ""
591 self.opener("journal.dirstate", "w").write(ds)
630 self.opener("journal.dirstate", "w").write(ds)
592 self.opener("journal.branch", "w").write(self.dirstate.branch())
631 self.opener("journal.branch", "w").write(self.dirstate.branch())
593 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
632 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
594
633
595 renames = [(self.sjoin("journal"), self.sjoin("undo")),
634 renames = [(self.sjoin("journal"), self.sjoin("undo")),
596 (self.join("journal.dirstate"), self.join("undo.dirstate")),
635 (self.join("journal.dirstate"), self.join("undo.dirstate")),
597 (self.join("journal.branch"), self.join("undo.branch")),
636 (self.join("journal.branch"), self.join("undo.branch")),
598 (self.join("journal.desc"), self.join("undo.desc"))]
637 (self.join("journal.desc"), self.join("undo.desc"))]
599 tr = transaction.transaction(self.ui.warn, self.sopener,
638 tr = transaction.transaction(self.ui.warn, self.sopener,
600 self.sjoin("journal"),
639 self.sjoin("journal"),
601 aftertrans(renames),
640 aftertrans(renames),
602 self.store.createmode)
641 self.store.createmode)
603 self._transref = weakref.ref(tr)
642 self._transref = weakref.ref(tr)
604 return tr
643 return tr
605
644
606 def recover(self):
645 def recover(self):
607 lock = self.lock()
646 lock = self.lock()
608 try:
647 try:
609 if os.path.exists(self.sjoin("journal")):
648 if os.path.exists(self.sjoin("journal")):
610 self.ui.status(_("rolling back interrupted transaction\n"))
649 self.ui.status(_("rolling back interrupted transaction\n"))
611 transaction.rollback(self.sopener, self.sjoin("journal"),
650 transaction.rollback(self.sopener, self.sjoin("journal"),
612 self.ui.warn)
651 self.ui.warn)
613 self.invalidate()
652 self.invalidate()
614 return True
653 return True
615 else:
654 else:
616 self.ui.warn(_("no interrupted transaction available\n"))
655 self.ui.warn(_("no interrupted transaction available\n"))
617 return False
656 return False
618 finally:
657 finally:
619 lock.release()
658 lock.release()
620
659
621 def rollback(self, dryrun=False):
660 def rollback(self, dryrun=False):
622 wlock = lock = None
661 wlock = lock = None
623 try:
662 try:
624 wlock = self.wlock()
663 wlock = self.wlock()
625 lock = self.lock()
664 lock = self.lock()
626 if os.path.exists(self.sjoin("undo")):
665 if os.path.exists(self.sjoin("undo")):
627 try:
666 try:
628 args = self.opener("undo.desc", "r").read().splitlines()
667 args = self.opener("undo.desc", "r").read().splitlines()
629 if len(args) >= 3 and self.ui.verbose:
668 if len(args) >= 3 and self.ui.verbose:
630 desc = _("rolling back to revision %s"
669 desc = _("rolling back to revision %s"
631 " (undo %s: %s)\n") % (
670 " (undo %s: %s)\n") % (
632 int(args[0]) - 1, args[1], args[2])
671 int(args[0]) - 1, args[1], args[2])
633 elif len(args) >= 2:
672 elif len(args) >= 2:
634 desc = _("rolling back to revision %s (undo %s)\n") % (
673 desc = _("rolling back to revision %s (undo %s)\n") % (
635 int(args[0]) - 1, args[1])
674 int(args[0]) - 1, args[1])
636 except IOError:
675 except IOError:
637 desc = _("rolling back unknown transaction\n")
676 desc = _("rolling back unknown transaction\n")
638 self.ui.status(desc)
677 self.ui.status(desc)
639 if dryrun:
678 if dryrun:
640 return
679 return
641 transaction.rollback(self.sopener, self.sjoin("undo"),
680 transaction.rollback(self.sopener, self.sjoin("undo"),
642 self.ui.warn)
681 self.ui.warn)
643 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
682 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
644 try:
683 try:
645 branch = self.opener("undo.branch").read()
684 branch = self.opener("undo.branch").read()
646 self.dirstate.setbranch(branch)
685 self.dirstate.setbranch(branch)
647 except IOError:
686 except IOError:
648 self.ui.warn(_("Named branch could not be reset, "
687 self.ui.warn(_("Named branch could not be reset, "
649 "current branch still is: %s\n")
688 "current branch still is: %s\n")
650 % encoding.tolocal(self.dirstate.branch()))
689 % encoding.tolocal(self.dirstate.branch()))
651 self.invalidate()
690 self.invalidate()
652 self.dirstate.invalidate()
691 self.dirstate.invalidate()
653 self.destroyed()
692 self.destroyed()
654 else:
693 else:
655 self.ui.warn(_("no rollback information available\n"))
694 self.ui.warn(_("no rollback information available\n"))
656 return 1
695 return 1
657 finally:
696 finally:
658 release(lock, wlock)
697 release(lock, wlock)
659
698
660 def invalidatecaches(self):
699 def invalidatecaches(self):
661 self._tags = None
700 self._tags = None
662 self._tagtypes = None
701 self._tagtypes = None
663 self.nodetagscache = None
702 self.nodetagscache = None
664 self._branchcache = None # in UTF-8
703 self._branchcache = None # in UTF-8
665 self._branchcachetip = None
704 self._branchcachetip = None
666
705
667 def invalidate(self):
706 def invalidate(self):
668 for a in "changelog manifest".split():
707 for a in "changelog manifest".split():
669 if a in self.__dict__:
708 if a in self.__dict__:
670 delattr(self, a)
709 delattr(self, a)
671 self.invalidatecaches()
710 self.invalidatecaches()
672
711
673 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
712 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
674 try:
713 try:
675 l = lock.lock(lockname, 0, releasefn, desc=desc)
714 l = lock.lock(lockname, 0, releasefn, desc=desc)
676 except error.LockHeld, inst:
715 except error.LockHeld, inst:
677 if not wait:
716 if not wait:
678 raise
717 raise
679 self.ui.warn(_("waiting for lock on %s held by %r\n") %
718 self.ui.warn(_("waiting for lock on %s held by %r\n") %
680 (desc, inst.locker))
719 (desc, inst.locker))
681 # default to 600 seconds timeout
720 # default to 600 seconds timeout
682 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
721 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
683 releasefn, desc=desc)
722 releasefn, desc=desc)
684 if acquirefn:
723 if acquirefn:
685 acquirefn()
724 acquirefn()
686 return l
725 return l
687
726
688 def lock(self, wait=True):
727 def lock(self, wait=True):
689 '''Lock the repository store (.hg/store) and return a weak reference
728 '''Lock the repository store (.hg/store) and return a weak reference
690 to the lock. Use this before modifying the store (e.g. committing or
729 to the lock. Use this before modifying the store (e.g. committing or
691 stripping). If you are opening a transaction, get a lock as well.)'''
730 stripping). If you are opening a transaction, get a lock as well.)'''
692 l = self._lockref and self._lockref()
731 l = self._lockref and self._lockref()
693 if l is not None and l.held:
732 if l is not None and l.held:
694 l.lock()
733 l.lock()
695 return l
734 return l
696
735
697 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
736 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
698 _('repository %s') % self.origroot)
737 _('repository %s') % self.origroot)
699 self._lockref = weakref.ref(l)
738 self._lockref = weakref.ref(l)
700 return l
739 return l
701
740
702 def wlock(self, wait=True):
741 def wlock(self, wait=True):
703 '''Lock the non-store parts of the repository (everything under
742 '''Lock the non-store parts of the repository (everything under
704 .hg except .hg/store) and return a weak reference to the lock.
743 .hg except .hg/store) and return a weak reference to the lock.
705 Use this before modifying files in .hg.'''
744 Use this before modifying files in .hg.'''
706 l = self._wlockref and self._wlockref()
745 l = self._wlockref and self._wlockref()
707 if l is not None and l.held:
746 if l is not None and l.held:
708 l.lock()
747 l.lock()
709 return l
748 return l
710
749
711 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
750 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
712 self.dirstate.invalidate, _('working directory of %s') %
751 self.dirstate.invalidate, _('working directory of %s') %
713 self.origroot)
752 self.origroot)
714 self._wlockref = weakref.ref(l)
753 self._wlockref = weakref.ref(l)
715 return l
754 return l
716
755
717 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
756 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
718 """
757 """
719 commit an individual file as part of a larger transaction
758 commit an individual file as part of a larger transaction
720 """
759 """
721
760
722 fname = fctx.path()
761 fname = fctx.path()
723 text = fctx.data()
762 text = fctx.data()
724 flog = self.file(fname)
763 flog = self.file(fname)
725 fparent1 = manifest1.get(fname, nullid)
764 fparent1 = manifest1.get(fname, nullid)
726 fparent2 = fparent2o = manifest2.get(fname, nullid)
765 fparent2 = fparent2o = manifest2.get(fname, nullid)
727
766
728 meta = {}
767 meta = {}
729 copy = fctx.renamed()
768 copy = fctx.renamed()
730 if copy and copy[0] != fname:
769 if copy and copy[0] != fname:
731 # Mark the new revision of this file as a copy of another
770 # Mark the new revision of this file as a copy of another
732 # file. This copy data will effectively act as a parent
771 # file. This copy data will effectively act as a parent
733 # of this new revision. If this is a merge, the first
772 # of this new revision. If this is a merge, the first
734 # parent will be the nullid (meaning "look up the copy data")
773 # parent will be the nullid (meaning "look up the copy data")
735 # and the second one will be the other parent. For example:
774 # and the second one will be the other parent. For example:
736 #
775 #
737 # 0 --- 1 --- 3 rev1 changes file foo
776 # 0 --- 1 --- 3 rev1 changes file foo
738 # \ / rev2 renames foo to bar and changes it
777 # \ / rev2 renames foo to bar and changes it
739 # \- 2 -/ rev3 should have bar with all changes and
778 # \- 2 -/ rev3 should have bar with all changes and
740 # should record that bar descends from
779 # should record that bar descends from
741 # bar in rev2 and foo in rev1
780 # bar in rev2 and foo in rev1
742 #
781 #
743 # this allows this merge to succeed:
782 # this allows this merge to succeed:
744 #
783 #
745 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
784 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
746 # \ / merging rev3 and rev4 should use bar@rev2
785 # \ / merging rev3 and rev4 should use bar@rev2
747 # \- 2 --- 4 as the merge base
786 # \- 2 --- 4 as the merge base
748 #
787 #
749
788
750 cfname = copy[0]
789 cfname = copy[0]
751 crev = manifest1.get(cfname)
790 crev = manifest1.get(cfname)
752 newfparent = fparent2
791 newfparent = fparent2
753
792
754 if manifest2: # branch merge
793 if manifest2: # branch merge
755 if fparent2 == nullid or crev is None: # copied on remote side
794 if fparent2 == nullid or crev is None: # copied on remote side
756 if cfname in manifest2:
795 if cfname in manifest2:
757 crev = manifest2[cfname]
796 crev = manifest2[cfname]
758 newfparent = fparent1
797 newfparent = fparent1
759
798
760 # find source in nearest ancestor if we've lost track
799 # find source in nearest ancestor if we've lost track
761 if not crev:
800 if not crev:
762 self.ui.debug(" %s: searching for copy revision for %s\n" %
801 self.ui.debug(" %s: searching for copy revision for %s\n" %
763 (fname, cfname))
802 (fname, cfname))
764 for ancestor in self['.'].ancestors():
803 for ancestor in self['.'].ancestors():
765 if cfname in ancestor:
804 if cfname in ancestor:
766 crev = ancestor[cfname].filenode()
805 crev = ancestor[cfname].filenode()
767 break
806 break
768
807
769 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
808 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
770 meta["copy"] = cfname
809 meta["copy"] = cfname
771 meta["copyrev"] = hex(crev)
810 meta["copyrev"] = hex(crev)
772 fparent1, fparent2 = nullid, newfparent
811 fparent1, fparent2 = nullid, newfparent
773 elif fparent2 != nullid:
812 elif fparent2 != nullid:
774 # is one parent an ancestor of the other?
813 # is one parent an ancestor of the other?
775 fparentancestor = flog.ancestor(fparent1, fparent2)
814 fparentancestor = flog.ancestor(fparent1, fparent2)
776 if fparentancestor == fparent1:
815 if fparentancestor == fparent1:
777 fparent1, fparent2 = fparent2, nullid
816 fparent1, fparent2 = fparent2, nullid
778 elif fparentancestor == fparent2:
817 elif fparentancestor == fparent2:
779 fparent2 = nullid
818 fparent2 = nullid
780
819
781 # is the file changed?
820 # is the file changed?
782 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
821 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
783 changelist.append(fname)
822 changelist.append(fname)
784 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
823 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
785
824
786 # are just the flags changed during merge?
825 # are just the flags changed during merge?
787 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
826 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
788 changelist.append(fname)
827 changelist.append(fname)
789
828
790 return fparent1
829 return fparent1
791
830
792 def commit(self, text="", user=None, date=None, match=None, force=False,
831 def commit(self, text="", user=None, date=None, match=None, force=False,
793 editor=False, extra={}):
832 editor=False, extra={}):
794 """Add a new revision to current repository.
833 """Add a new revision to current repository.
795
834
796 Revision information is gathered from the working directory,
835 Revision information is gathered from the working directory,
797 match can be used to filter the committed files. If editor is
836 match can be used to filter the committed files. If editor is
798 supplied, it is called to get a commit message.
837 supplied, it is called to get a commit message.
799 """
838 """
800
839
801 def fail(f, msg):
840 def fail(f, msg):
802 raise util.Abort('%s: %s' % (f, msg))
841 raise util.Abort('%s: %s' % (f, msg))
803
842
804 if not match:
843 if not match:
805 match = matchmod.always(self.root, '')
844 match = matchmod.always(self.root, '')
806
845
807 if not force:
846 if not force:
808 vdirs = []
847 vdirs = []
809 match.dir = vdirs.append
848 match.dir = vdirs.append
810 match.bad = fail
849 match.bad = fail
811
850
812 wlock = self.wlock()
851 wlock = self.wlock()
813 try:
852 try:
814 wctx = self[None]
853 wctx = self[None]
815 merge = len(wctx.parents()) > 1
854 merge = len(wctx.parents()) > 1
816
855
817 if (not force and merge and match and
856 if (not force and merge and match and
818 (match.files() or match.anypats())):
857 (match.files() or match.anypats())):
819 raise util.Abort(_('cannot partially commit a merge '
858 raise util.Abort(_('cannot partially commit a merge '
820 '(do not specify files or patterns)'))
859 '(do not specify files or patterns)'))
821
860
822 changes = self.status(match=match, clean=force)
861 changes = self.status(match=match, clean=force)
823 if force:
862 if force:
824 changes[0].extend(changes[6]) # mq may commit unchanged files
863 changes[0].extend(changes[6]) # mq may commit unchanged files
825
864
826 # check subrepos
865 # check subrepos
827 subs = []
866 subs = []
828 removedsubs = set()
867 removedsubs = set()
829 for p in wctx.parents():
868 for p in wctx.parents():
830 removedsubs.update(s for s in p.substate if match(s))
869 removedsubs.update(s for s in p.substate if match(s))
831 for s in wctx.substate:
870 for s in wctx.substate:
832 removedsubs.discard(s)
871 removedsubs.discard(s)
833 if match(s) and wctx.sub(s).dirty():
872 if match(s) and wctx.sub(s).dirty():
834 subs.append(s)
873 subs.append(s)
835 if (subs or removedsubs):
874 if (subs or removedsubs):
836 if (not match('.hgsub') and
875 if (not match('.hgsub') and
837 '.hgsub' in (wctx.modified() + wctx.added())):
876 '.hgsub' in (wctx.modified() + wctx.added())):
838 raise util.Abort(_("can't commit subrepos without .hgsub"))
877 raise util.Abort(_("can't commit subrepos without .hgsub"))
839 if '.hgsubstate' not in changes[0]:
878 if '.hgsubstate' not in changes[0]:
840 changes[0].insert(0, '.hgsubstate')
879 changes[0].insert(0, '.hgsubstate')
841
880
842 # make sure all explicit patterns are matched
881 # make sure all explicit patterns are matched
843 if not force and match.files():
882 if not force and match.files():
844 matched = set(changes[0] + changes[1] + changes[2])
883 matched = set(changes[0] + changes[1] + changes[2])
845
884
846 for f in match.files():
885 for f in match.files():
847 if f == '.' or f in matched or f in wctx.substate:
886 if f == '.' or f in matched or f in wctx.substate:
848 continue
887 continue
849 if f in changes[3]: # missing
888 if f in changes[3]: # missing
850 fail(f, _('file not found!'))
889 fail(f, _('file not found!'))
851 if f in vdirs: # visited directory
890 if f in vdirs: # visited directory
852 d = f + '/'
891 d = f + '/'
853 for mf in matched:
892 for mf in matched:
854 if mf.startswith(d):
893 if mf.startswith(d):
855 break
894 break
856 else:
895 else:
857 fail(f, _("no match under directory!"))
896 fail(f, _("no match under directory!"))
858 elif f not in self.dirstate:
897 elif f not in self.dirstate:
859 fail(f, _("file not tracked!"))
898 fail(f, _("file not tracked!"))
860
899
861 if (not force and not extra.get("close") and not merge
900 if (not force and not extra.get("close") and not merge
862 and not (changes[0] or changes[1] or changes[2])
901 and not (changes[0] or changes[1] or changes[2])
863 and wctx.branch() == wctx.p1().branch()):
902 and wctx.branch() == wctx.p1().branch()):
864 return None
903 return None
865
904
866 ms = mergemod.mergestate(self)
905 ms = mergemod.mergestate(self)
867 for f in changes[0]:
906 for f in changes[0]:
868 if f in ms and ms[f] == 'u':
907 if f in ms and ms[f] == 'u':
869 raise util.Abort(_("unresolved merge conflicts "
908 raise util.Abort(_("unresolved merge conflicts "
870 "(see hg resolve)"))
909 "(see hg resolve)"))
871
910
872 cctx = context.workingctx(self, text, user, date, extra, changes)
911 cctx = context.workingctx(self, text, user, date, extra, changes)
873 if editor:
912 if editor:
874 cctx._text = editor(self, cctx, subs)
913 cctx._text = editor(self, cctx, subs)
875 edited = (text != cctx._text)
914 edited = (text != cctx._text)
876
915
877 # commit subs
916 # commit subs
878 if subs or removedsubs:
917 if subs or removedsubs:
879 state = wctx.substate.copy()
918 state = wctx.substate.copy()
880 for s in sorted(subs):
919 for s in sorted(subs):
881 sub = wctx.sub(s)
920 sub = wctx.sub(s)
882 self.ui.status(_('committing subrepository %s\n') %
921 self.ui.status(_('committing subrepository %s\n') %
883 subrepo.relpath(sub))
922 subrepo.relpath(sub))
884 sr = sub.commit(cctx._text, user, date)
923 sr = sub.commit(cctx._text, user, date)
885 state[s] = (state[s][0], sr)
924 state[s] = (state[s][0], sr)
886 subrepo.writestate(self, state)
925 subrepo.writestate(self, state)
887
926
888 # Save commit message in case this transaction gets rolled back
927 # Save commit message in case this transaction gets rolled back
889 # (e.g. by a pretxncommit hook). Leave the content alone on
928 # (e.g. by a pretxncommit hook). Leave the content alone on
890 # the assumption that the user will use the same editor again.
929 # the assumption that the user will use the same editor again.
891 msgfile = self.opener('last-message.txt', 'wb')
930 msgfile = self.opener('last-message.txt', 'wb')
892 msgfile.write(cctx._text)
931 msgfile.write(cctx._text)
893 msgfile.close()
932 msgfile.close()
894
933
895 p1, p2 = self.dirstate.parents()
934 p1, p2 = self.dirstate.parents()
896 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
935 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
897 try:
936 try:
898 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
937 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
899 ret = self.commitctx(cctx, True)
938 ret = self.commitctx(cctx, True)
900 except:
939 except:
901 if edited:
940 if edited:
902 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
941 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
903 self.ui.write(
942 self.ui.write(
904 _('note: commit message saved in %s\n') % msgfn)
943 _('note: commit message saved in %s\n') % msgfn)
905 raise
944 raise
906
945
907 # update dirstate and mergestate
946 # update dirstate and mergestate
908 for f in changes[0] + changes[1]:
947 for f in changes[0] + changes[1]:
909 self.dirstate.normal(f)
948 self.dirstate.normal(f)
910 for f in changes[2]:
949 for f in changes[2]:
911 self.dirstate.forget(f)
950 self.dirstate.forget(f)
912 self.dirstate.setparents(ret)
951 self.dirstate.setparents(ret)
913 ms.reset()
952 ms.reset()
914 finally:
953 finally:
915 wlock.release()
954 wlock.release()
916
955
917 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
956 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
918 return ret
957 return ret
919
958
920 def commitctx(self, ctx, error=False):
959 def commitctx(self, ctx, error=False):
921 """Add a new revision to current repository.
960 """Add a new revision to current repository.
922 Revision information is passed via the context argument.
961 Revision information is passed via the context argument.
923 """
962 """
924
963
925 tr = lock = None
964 tr = lock = None
926 removed = ctx.removed()
965 removed = ctx.removed()
927 p1, p2 = ctx.p1(), ctx.p2()
966 p1, p2 = ctx.p1(), ctx.p2()
928 m1 = p1.manifest().copy()
967 m1 = p1.manifest().copy()
929 m2 = p2.manifest()
968 m2 = p2.manifest()
930 user = ctx.user()
969 user = ctx.user()
931
970
932 lock = self.lock()
971 lock = self.lock()
933 try:
972 try:
934 tr = self.transaction("commit")
973 tr = self.transaction("commit")
935 trp = weakref.proxy(tr)
974 trp = weakref.proxy(tr)
936
975
937 # check in files
976 # check in files
938 new = {}
977 new = {}
939 changed = []
978 changed = []
940 linkrev = len(self)
979 linkrev = len(self)
941 for f in sorted(ctx.modified() + ctx.added()):
980 for f in sorted(ctx.modified() + ctx.added()):
942 self.ui.note(f + "\n")
981 self.ui.note(f + "\n")
943 try:
982 try:
944 fctx = ctx[f]
983 fctx = ctx[f]
945 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
984 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
946 changed)
985 changed)
947 m1.set(f, fctx.flags())
986 m1.set(f, fctx.flags())
948 except OSError, inst:
987 except OSError, inst:
949 self.ui.warn(_("trouble committing %s!\n") % f)
988 self.ui.warn(_("trouble committing %s!\n") % f)
950 raise
989 raise
951 except IOError, inst:
990 except IOError, inst:
952 errcode = getattr(inst, 'errno', errno.ENOENT)
991 errcode = getattr(inst, 'errno', errno.ENOENT)
953 if error or errcode and errcode != errno.ENOENT:
992 if error or errcode and errcode != errno.ENOENT:
954 self.ui.warn(_("trouble committing %s!\n") % f)
993 self.ui.warn(_("trouble committing %s!\n") % f)
955 raise
994 raise
956 else:
995 else:
957 removed.append(f)
996 removed.append(f)
958
997
959 # update manifest
998 # update manifest
960 m1.update(new)
999 m1.update(new)
961 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1000 removed = [f for f in sorted(removed) if f in m1 or f in m2]
962 drop = [f for f in removed if f in m1]
1001 drop = [f for f in removed if f in m1]
963 for f in drop:
1002 for f in drop:
964 del m1[f]
1003 del m1[f]
965 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1004 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
966 p2.manifestnode(), (new, drop))
1005 p2.manifestnode(), (new, drop))
967
1006
968 # update changelog
1007 # update changelog
969 self.changelog.delayupdate()
1008 self.changelog.delayupdate()
970 n = self.changelog.add(mn, changed + removed, ctx.description(),
1009 n = self.changelog.add(mn, changed + removed, ctx.description(),
971 trp, p1.node(), p2.node(),
1010 trp, p1.node(), p2.node(),
972 user, ctx.date(), ctx.extra().copy())
1011 user, ctx.date(), ctx.extra().copy())
973 p = lambda: self.changelog.writepending() and self.root or ""
1012 p = lambda: self.changelog.writepending() and self.root or ""
974 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1013 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
975 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1014 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
976 parent2=xp2, pending=p)
1015 parent2=xp2, pending=p)
977 self.changelog.finalize(trp)
1016 self.changelog.finalize(trp)
978 tr.close()
1017 tr.close()
979
1018
980 if self._branchcache:
1019 if self._branchcache:
981 self.updatebranchcache()
1020 self.updatebranchcache()
982 return n
1021 return n
983 finally:
1022 finally:
984 if tr:
1023 if tr:
985 tr.release()
1024 tr.release()
986 lock.release()
1025 lock.release()
987
1026
988 def destroyed(self):
1027 def destroyed(self):
989 '''Inform the repository that nodes have been destroyed.
1028 '''Inform the repository that nodes have been destroyed.
990 Intended for use by strip and rollback, so there's a common
1029 Intended for use by strip and rollback, so there's a common
991 place for anything that has to be done after destroying history.'''
1030 place for anything that has to be done after destroying history.'''
992 # XXX it might be nice if we could take the list of destroyed
1031 # XXX it might be nice if we could take the list of destroyed
993 # nodes, but I don't see an easy way for rollback() to do that
1032 # nodes, but I don't see an easy way for rollback() to do that
994
1033
995 # Ensure the persistent tag cache is updated. Doing it now
1034 # Ensure the persistent tag cache is updated. Doing it now
996 # means that the tag cache only has to worry about destroyed
1035 # means that the tag cache only has to worry about destroyed
997 # heads immediately after a strip/rollback. That in turn
1036 # heads immediately after a strip/rollback. That in turn
998 # guarantees that "cachetip == currenttip" (comparing both rev
1037 # guarantees that "cachetip == currenttip" (comparing both rev
999 # and node) always means no nodes have been added or destroyed.
1038 # and node) always means no nodes have been added or destroyed.
1000
1039
1001 # XXX this is suboptimal when qrefresh'ing: we strip the current
1040 # XXX this is suboptimal when qrefresh'ing: we strip the current
1002 # head, refresh the tag cache, then immediately add a new head.
1041 # head, refresh the tag cache, then immediately add a new head.
1003 # But I think doing it this way is necessary for the "instant
1042 # But I think doing it this way is necessary for the "instant
1004 # tag cache retrieval" case to work.
1043 # tag cache retrieval" case to work.
1005 self.invalidatecaches()
1044 self.invalidatecaches()
1006
1045
1007 def walk(self, match, node=None):
1046 def walk(self, match, node=None):
1008 '''
1047 '''
1009 walk recursively through the directory tree or a given
1048 walk recursively through the directory tree or a given
1010 changeset, finding all files matched by the match
1049 changeset, finding all files matched by the match
1011 function
1050 function
1012 '''
1051 '''
1013 return self[node].walk(match)
1052 return self[node].walk(match)
1014
1053
1015 def status(self, node1='.', node2=None, match=None,
1054 def status(self, node1='.', node2=None, match=None,
1016 ignored=False, clean=False, unknown=False):
1055 ignored=False, clean=False, unknown=False):
1017 """return status of files between two nodes or node and working directory
1056 """return status of files between two nodes or node and working directory
1018
1057
1019 If node1 is None, use the first dirstate parent instead.
1058 If node1 is None, use the first dirstate parent instead.
1020 If node2 is None, compare node1 with working directory.
1059 If node2 is None, compare node1 with working directory.
1021 """
1060 """
1022
1061
1023 def mfmatches(ctx):
1062 def mfmatches(ctx):
1024 mf = ctx.manifest().copy()
1063 mf = ctx.manifest().copy()
1025 for fn in mf.keys():
1064 for fn in mf.keys():
1026 if not match(fn):
1065 if not match(fn):
1027 del mf[fn]
1066 del mf[fn]
1028 return mf
1067 return mf
1029
1068
1030 if isinstance(node1, context.changectx):
1069 if isinstance(node1, context.changectx):
1031 ctx1 = node1
1070 ctx1 = node1
1032 else:
1071 else:
1033 ctx1 = self[node1]
1072 ctx1 = self[node1]
1034 if isinstance(node2, context.changectx):
1073 if isinstance(node2, context.changectx):
1035 ctx2 = node2
1074 ctx2 = node2
1036 else:
1075 else:
1037 ctx2 = self[node2]
1076 ctx2 = self[node2]
1038
1077
1039 working = ctx2.rev() is None
1078 working = ctx2.rev() is None
1040 parentworking = working and ctx1 == self['.']
1079 parentworking = working and ctx1 == self['.']
1041 match = match or matchmod.always(self.root, self.getcwd())
1080 match = match or matchmod.always(self.root, self.getcwd())
1042 listignored, listclean, listunknown = ignored, clean, unknown
1081 listignored, listclean, listunknown = ignored, clean, unknown
1043
1082
1044 # load earliest manifest first for caching reasons
1083 # load earliest manifest first for caching reasons
1045 if not working and ctx2.rev() < ctx1.rev():
1084 if not working and ctx2.rev() < ctx1.rev():
1046 ctx2.manifest()
1085 ctx2.manifest()
1047
1086
1048 if not parentworking:
1087 if not parentworking:
1049 def bad(f, msg):
1088 def bad(f, msg):
1050 if f not in ctx1:
1089 if f not in ctx1:
1051 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1090 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1052 match.bad = bad
1091 match.bad = bad
1053
1092
1054 if working: # we need to scan the working dir
1093 if working: # we need to scan the working dir
1055 subrepos = []
1094 subrepos = []
1056 if '.hgsub' in self.dirstate:
1095 if '.hgsub' in self.dirstate:
1057 subrepos = ctx1.substate.keys()
1096 subrepos = ctx1.substate.keys()
1058 s = self.dirstate.status(match, subrepos, listignored,
1097 s = self.dirstate.status(match, subrepos, listignored,
1059 listclean, listunknown)
1098 listclean, listunknown)
1060 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1099 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1061
1100
1062 # check for any possibly clean files
1101 # check for any possibly clean files
1063 if parentworking and cmp:
1102 if parentworking and cmp:
1064 fixup = []
1103 fixup = []
1065 # do a full compare of any files that might have changed
1104 # do a full compare of any files that might have changed
1066 for f in sorted(cmp):
1105 for f in sorted(cmp):
1067 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1106 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1068 or ctx1[f].cmp(ctx2[f])):
1107 or ctx1[f].cmp(ctx2[f])):
1069 modified.append(f)
1108 modified.append(f)
1070 else:
1109 else:
1071 fixup.append(f)
1110 fixup.append(f)
1072
1111
1073 # update dirstate for files that are actually clean
1112 # update dirstate for files that are actually clean
1074 if fixup:
1113 if fixup:
1075 if listclean:
1114 if listclean:
1076 clean += fixup
1115 clean += fixup
1077
1116
1078 try:
1117 try:
1079 # updating the dirstate is optional
1118 # updating the dirstate is optional
1080 # so we don't wait on the lock
1119 # so we don't wait on the lock
1081 wlock = self.wlock(False)
1120 wlock = self.wlock(False)
1082 try:
1121 try:
1083 for f in fixup:
1122 for f in fixup:
1084 self.dirstate.normal(f)
1123 self.dirstate.normal(f)
1085 finally:
1124 finally:
1086 wlock.release()
1125 wlock.release()
1087 except error.LockError:
1126 except error.LockError:
1088 pass
1127 pass
1089
1128
1090 if not parentworking:
1129 if not parentworking:
1091 mf1 = mfmatches(ctx1)
1130 mf1 = mfmatches(ctx1)
1092 if working:
1131 if working:
1093 # we are comparing working dir against non-parent
1132 # we are comparing working dir against non-parent
1094 # generate a pseudo-manifest for the working dir
1133 # generate a pseudo-manifest for the working dir
1095 mf2 = mfmatches(self['.'])
1134 mf2 = mfmatches(self['.'])
1096 for f in cmp + modified + added:
1135 for f in cmp + modified + added:
1097 mf2[f] = None
1136 mf2[f] = None
1098 mf2.set(f, ctx2.flags(f))
1137 mf2.set(f, ctx2.flags(f))
1099 for f in removed:
1138 for f in removed:
1100 if f in mf2:
1139 if f in mf2:
1101 del mf2[f]
1140 del mf2[f]
1102 else:
1141 else:
1103 # we are comparing two revisions
1142 # we are comparing two revisions
1104 deleted, unknown, ignored = [], [], []
1143 deleted, unknown, ignored = [], [], []
1105 mf2 = mfmatches(ctx2)
1144 mf2 = mfmatches(ctx2)
1106
1145
1107 modified, added, clean = [], [], []
1146 modified, added, clean = [], [], []
1108 for fn in mf2:
1147 for fn in mf2:
1109 if fn in mf1:
1148 if fn in mf1:
1110 if (mf1.flags(fn) != mf2.flags(fn) or
1149 if (mf1.flags(fn) != mf2.flags(fn) or
1111 (mf1[fn] != mf2[fn] and
1150 (mf1[fn] != mf2[fn] and
1112 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1151 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1113 modified.append(fn)
1152 modified.append(fn)
1114 elif listclean:
1153 elif listclean:
1115 clean.append(fn)
1154 clean.append(fn)
1116 del mf1[fn]
1155 del mf1[fn]
1117 else:
1156 else:
1118 added.append(fn)
1157 added.append(fn)
1119 removed = mf1.keys()
1158 removed = mf1.keys()
1120
1159
1121 r = modified, added, removed, deleted, unknown, ignored, clean
1160 r = modified, added, removed, deleted, unknown, ignored, clean
1122 [l.sort() for l in r]
1161 [l.sort() for l in r]
1123 return r
1162 return r
1124
1163
1125 def heads(self, start=None):
1164 def heads(self, start=None):
1126 heads = self.changelog.heads(start)
1165 heads = self.changelog.heads(start)
1127 # sort the output in rev descending order
1166 # sort the output in rev descending order
1128 heads = [(-self.changelog.rev(h), h) for h in heads]
1167 heads = [(-self.changelog.rev(h), h) for h in heads]
1129 return [n for (r, n) in sorted(heads)]
1168 return [n for (r, n) in sorted(heads)]
1130
1169
1131 def branchheads(self, branch=None, start=None, closed=False):
1170 def branchheads(self, branch=None, start=None, closed=False):
1132 '''return a (possibly filtered) list of heads for the given branch
1171 '''return a (possibly filtered) list of heads for the given branch
1133
1172
1134 Heads are returned in topological order, from newest to oldest.
1173 Heads are returned in topological order, from newest to oldest.
1135 If branch is None, use the dirstate branch.
1174 If branch is None, use the dirstate branch.
1136 If start is not None, return only heads reachable from start.
1175 If start is not None, return only heads reachable from start.
1137 If closed is True, return heads that are marked as closed as well.
1176 If closed is True, return heads that are marked as closed as well.
1138 '''
1177 '''
1139 if branch is None:
1178 if branch is None:
1140 branch = self[None].branch()
1179 branch = self[None].branch()
1141 branches = self.branchmap()
1180 branches = self.branchmap()
1142 if branch not in branches:
1181 if branch not in branches:
1143 return []
1182 return []
1144 # the cache returns heads ordered lowest to highest
1183 # the cache returns heads ordered lowest to highest
1145 bheads = list(reversed(branches[branch]))
1184 bheads = list(reversed(branches[branch]))
1146 if start is not None:
1185 if start is not None:
1147 # filter out the heads that cannot be reached from startrev
1186 # filter out the heads that cannot be reached from startrev
1148 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1187 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1149 bheads = [h for h in bheads if h in fbheads]
1188 bheads = [h for h in bheads if h in fbheads]
1150 if not closed:
1189 if not closed:
1151 bheads = [h for h in bheads if
1190 bheads = [h for h in bheads if
1152 ('close' not in self.changelog.read(h)[5])]
1191 ('close' not in self.changelog.read(h)[5])]
1153 return bheads
1192 return bheads
1154
1193
1155 def branches(self, nodes):
1194 def branches(self, nodes):
1156 if not nodes:
1195 if not nodes:
1157 nodes = [self.changelog.tip()]
1196 nodes = [self.changelog.tip()]
1158 b = []
1197 b = []
1159 for n in nodes:
1198 for n in nodes:
1160 t = n
1199 t = n
1161 while 1:
1200 while 1:
1162 p = self.changelog.parents(n)
1201 p = self.changelog.parents(n)
1163 if p[1] != nullid or p[0] == nullid:
1202 if p[1] != nullid or p[0] == nullid:
1164 b.append((t, n, p[0], p[1]))
1203 b.append((t, n, p[0], p[1]))
1165 break
1204 break
1166 n = p[0]
1205 n = p[0]
1167 return b
1206 return b
1168
1207
1169 def between(self, pairs):
1208 def between(self, pairs):
1170 r = []
1209 r = []
1171
1210
1172 for top, bottom in pairs:
1211 for top, bottom in pairs:
1173 n, l, i = top, [], 0
1212 n, l, i = top, [], 0
1174 f = 1
1213 f = 1
1175
1214
1176 while n != bottom and n != nullid:
1215 while n != bottom and n != nullid:
1177 p = self.changelog.parents(n)[0]
1216 p = self.changelog.parents(n)[0]
1178 if i == f:
1217 if i == f:
1179 l.append(n)
1218 l.append(n)
1180 f = f * 2
1219 f = f * 2
1181 n = p
1220 n = p
1182 i += 1
1221 i += 1
1183
1222
1184 r.append(l)
1223 r.append(l)
1185
1224
1186 return r
1225 return r
1187
1226
1188 def pull(self, remote, heads=None, force=False):
1227 def pull(self, remote, heads=None, force=False):
1189 lock = self.lock()
1228 lock = self.lock()
1190 try:
1229 try:
1191 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1230 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1192 force=force)
1231 force=force)
1193 common, fetch, rheads = tmp
1232 common, fetch, rheads = tmp
1194 if not fetch:
1233 if not fetch:
1195 self.ui.status(_("no changes found\n"))
1234 self.ui.status(_("no changes found\n"))
1196 return 0
1235 return 0
1197
1236
1198 if fetch == [nullid]:
1237 if fetch == [nullid]:
1199 self.ui.status(_("requesting all changes\n"))
1238 self.ui.status(_("requesting all changes\n"))
1200 elif heads is None and remote.capable('changegroupsubset'):
1239 elif heads is None and remote.capable('changegroupsubset'):
1201 # issue1320, avoid a race if remote changed after discovery
1240 # issue1320, avoid a race if remote changed after discovery
1202 heads = rheads
1241 heads = rheads
1203
1242
1204 if heads is None:
1243 if heads is None:
1205 cg = remote.changegroup(fetch, 'pull')
1244 cg = remote.changegroup(fetch, 'pull')
1206 else:
1245 else:
1207 if not remote.capable('changegroupsubset'):
1246 if not remote.capable('changegroupsubset'):
1208 raise util.Abort(_("partial pull cannot be done because "
1247 raise util.Abort(_("partial pull cannot be done because "
1209 "other repository doesn't support "
1248 "other repository doesn't support "
1210 "changegroupsubset."))
1249 "changegroupsubset."))
1211 cg = remote.changegroupsubset(fetch, heads, 'pull')
1250 cg = remote.changegroupsubset(fetch, heads, 'pull')
1212 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1251 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1213 finally:
1252 finally:
1214 lock.release()
1253 lock.release()
1215
1254
1216 def push(self, remote, force=False, revs=None, newbranch=False):
1255 def push(self, remote, force=False, revs=None, newbranch=False):
1217 '''Push outgoing changesets (limited by revs) from the current
1256 '''Push outgoing changesets (limited by revs) from the current
1218 repository to remote. Return an integer:
1257 repository to remote. Return an integer:
1219 - 0 means HTTP error *or* nothing to push
1258 - 0 means HTTP error *or* nothing to push
1220 - 1 means we pushed and remote head count is unchanged *or*
1259 - 1 means we pushed and remote head count is unchanged *or*
1221 we have outgoing changesets but refused to push
1260 we have outgoing changesets but refused to push
1222 - other values as described by addchangegroup()
1261 - other values as described by addchangegroup()
1223 '''
1262 '''
1224 # there are two ways to push to remote repo:
1263 # there are two ways to push to remote repo:
1225 #
1264 #
1226 # addchangegroup assumes local user can lock remote
1265 # addchangegroup assumes local user can lock remote
1227 # repo (local filesystem, old ssh servers).
1266 # repo (local filesystem, old ssh servers).
1228 #
1267 #
1229 # unbundle assumes local user cannot lock remote repo (new ssh
1268 # unbundle assumes local user cannot lock remote repo (new ssh
1230 # servers, http servers).
1269 # servers, http servers).
1231
1270
1232 lock = None
1271 lock = None
1233 unbundle = remote.capable('unbundle')
1272 unbundle = remote.capable('unbundle')
1234 if not unbundle:
1273 if not unbundle:
1235 lock = remote.lock()
1274 lock = remote.lock()
1236 try:
1275 try:
1237 ret = discovery.prepush(self, remote, force, revs, newbranch)
1276 ret = discovery.prepush(self, remote, force, revs, newbranch)
1238 if ret[0] is None:
1277 if ret[0] is None:
1239 # and here we return 0 for "nothing to push" or 1 for
1278 # and here we return 0 for "nothing to push" or 1 for
1240 # "something to push but I refuse"
1279 # "something to push but I refuse"
1241 return ret[1]
1280 return ret[1]
1242
1281
1243 cg, remote_heads = ret
1282 cg, remote_heads = ret
1244 if unbundle:
1283 if unbundle:
1245 # local repo finds heads on server, finds out what revs it must
1284 # local repo finds heads on server, finds out what revs it must
1246 # push. once revs transferred, if server finds it has
1285 # push. once revs transferred, if server finds it has
1247 # different heads (someone else won commit/push race), server
1286 # different heads (someone else won commit/push race), server
1248 # aborts.
1287 # aborts.
1249 if force:
1288 if force:
1250 remote_heads = ['force']
1289 remote_heads = ['force']
1251 # ssh: return remote's addchangegroup()
1290 # ssh: return remote's addchangegroup()
1252 # http: return remote's addchangegroup() or 0 for error
1291 # http: return remote's addchangegroup() or 0 for error
1253 return remote.unbundle(cg, remote_heads, 'push')
1292 return remote.unbundle(cg, remote_heads, 'push')
1254 else:
1293 else:
1255 # we return an integer indicating remote head count change
1294 # we return an integer indicating remote head count change
1256 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1295 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1257 finally:
1296 finally:
1258 if lock is not None:
1297 if lock is not None:
1259 lock.release()
1298 lock.release()
1260
1299
1261 def changegroupinfo(self, nodes, source):
1300 def changegroupinfo(self, nodes, source):
1262 if self.ui.verbose or source == 'bundle':
1301 if self.ui.verbose or source == 'bundle':
1263 self.ui.status(_("%d changesets found\n") % len(nodes))
1302 self.ui.status(_("%d changesets found\n") % len(nodes))
1264 if self.ui.debugflag:
1303 if self.ui.debugflag:
1265 self.ui.debug("list of changesets:\n")
1304 self.ui.debug("list of changesets:\n")
1266 for node in nodes:
1305 for node in nodes:
1267 self.ui.debug("%s\n" % hex(node))
1306 self.ui.debug("%s\n" % hex(node))
1268
1307
1269 def changegroupsubset(self, bases, heads, source, extranodes=None):
1308 def changegroupsubset(self, bases, heads, source, extranodes=None):
1270 """Compute a changegroup consisting of all the nodes that are
1309 """Compute a changegroup consisting of all the nodes that are
1271 descendents of any of the bases and ancestors of any of the heads.
1310 descendents of any of the bases and ancestors of any of the heads.
1272 Return a chunkbuffer object whose read() method will return
1311 Return a chunkbuffer object whose read() method will return
1273 successive changegroup chunks.
1312 successive changegroup chunks.
1274
1313
1275 It is fairly complex as determining which filenodes and which
1314 It is fairly complex as determining which filenodes and which
1276 manifest nodes need to be included for the changeset to be complete
1315 manifest nodes need to be included for the changeset to be complete
1277 is non-trivial.
1316 is non-trivial.
1278
1317
1279 Another wrinkle is doing the reverse, figuring out which changeset in
1318 Another wrinkle is doing the reverse, figuring out which changeset in
1280 the changegroup a particular filenode or manifestnode belongs to.
1319 the changegroup a particular filenode or manifestnode belongs to.
1281
1320
1282 The caller can specify some nodes that must be included in the
1321 The caller can specify some nodes that must be included in the
1283 changegroup using the extranodes argument. It should be a dict
1322 changegroup using the extranodes argument. It should be a dict
1284 where the keys are the filenames (or 1 for the manifest), and the
1323 where the keys are the filenames (or 1 for the manifest), and the
1285 values are lists of (node, linknode) tuples, where node is a wanted
1324 values are lists of (node, linknode) tuples, where node is a wanted
1286 node and linknode is the changelog node that should be transmitted as
1325 node and linknode is the changelog node that should be transmitted as
1287 the linkrev.
1326 the linkrev.
1288 """
1327 """
1289
1328
1290 # Set up some initial variables
1329 # Set up some initial variables
1291 # Make it easy to refer to self.changelog
1330 # Make it easy to refer to self.changelog
1292 cl = self.changelog
1331 cl = self.changelog
1293 # Compute the list of changesets in this changegroup.
1332 # Compute the list of changesets in this changegroup.
1294 # Some bases may turn out to be superfluous, and some heads may be
1333 # Some bases may turn out to be superfluous, and some heads may be
1295 # too. nodesbetween will return the minimal set of bases and heads
1334 # too. nodesbetween will return the minimal set of bases and heads
1296 # necessary to re-create the changegroup.
1335 # necessary to re-create the changegroup.
1297 if not bases:
1336 if not bases:
1298 bases = [nullid]
1337 bases = [nullid]
1299 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1338 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1300
1339
1301 if extranodes is None:
1340 if extranodes is None:
1302 # can we go through the fast path ?
1341 # can we go through the fast path ?
1303 heads.sort()
1342 heads.sort()
1304 allheads = self.heads()
1343 allheads = self.heads()
1305 allheads.sort()
1344 allheads.sort()
1306 if heads == allheads:
1345 if heads == allheads:
1307 return self._changegroup(msng_cl_lst, source)
1346 return self._changegroup(msng_cl_lst, source)
1308
1347
1309 # slow path
1348 # slow path
1310 self.hook('preoutgoing', throw=True, source=source)
1349 self.hook('preoutgoing', throw=True, source=source)
1311
1350
1312 self.changegroupinfo(msng_cl_lst, source)
1351 self.changegroupinfo(msng_cl_lst, source)
1313
1352
1314 # We assume that all ancestors of bases are known
1353 # We assume that all ancestors of bases are known
1315 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1354 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1316
1355
1317 # Make it easy to refer to self.manifest
1356 # Make it easy to refer to self.manifest
1318 mnfst = self.manifest
1357 mnfst = self.manifest
1319 # We don't know which manifests are missing yet
1358 # We don't know which manifests are missing yet
1320 msng_mnfst_set = {}
1359 msng_mnfst_set = {}
1321 # Nor do we know which filenodes are missing.
1360 # Nor do we know which filenodes are missing.
1322 msng_filenode_set = {}
1361 msng_filenode_set = {}
1323
1362
1324 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1363 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1325 junk = None
1364 junk = None
1326
1365
1327 # A changeset always belongs to itself, so the changenode lookup
1366 # A changeset always belongs to itself, so the changenode lookup
1328 # function for a changenode is identity.
1367 # function for a changenode is identity.
1329 def identity(x):
1368 def identity(x):
1330 return x
1369 return x
1331
1370
1332 # A function generating function that sets up the initial environment
1371 # A function generating function that sets up the initial environment
1333 # the inner function.
1372 # the inner function.
1334 def filenode_collector(changedfiles):
1373 def filenode_collector(changedfiles):
1335 # This gathers information from each manifestnode included in the
1374 # This gathers information from each manifestnode included in the
1336 # changegroup about which filenodes the manifest node references
1375 # changegroup about which filenodes the manifest node references
1337 # so we can include those in the changegroup too.
1376 # so we can include those in the changegroup too.
1338 #
1377 #
1339 # It also remembers which changenode each filenode belongs to. It
1378 # It also remembers which changenode each filenode belongs to. It
1340 # does this by assuming the a filenode belongs to the changenode
1379 # does this by assuming the a filenode belongs to the changenode
1341 # the first manifest that references it belongs to.
1380 # the first manifest that references it belongs to.
1342 def collect_msng_filenodes(mnfstnode):
1381 def collect_msng_filenodes(mnfstnode):
1343 r = mnfst.rev(mnfstnode)
1382 r = mnfst.rev(mnfstnode)
1344 if r - 1 in mnfst.parentrevs(r):
1383 if r - 1 in mnfst.parentrevs(r):
1345 # If the previous rev is one of the parents,
1384 # If the previous rev is one of the parents,
1346 # we only need to see a diff.
1385 # we only need to see a diff.
1347 deltamf = mnfst.readdelta(mnfstnode)
1386 deltamf = mnfst.readdelta(mnfstnode)
1348 # For each line in the delta
1387 # For each line in the delta
1349 for f, fnode in deltamf.iteritems():
1388 for f, fnode in deltamf.iteritems():
1350 # And if the file is in the list of files we care
1389 # And if the file is in the list of files we care
1351 # about.
1390 # about.
1352 if f in changedfiles:
1391 if f in changedfiles:
1353 # Get the changenode this manifest belongs to
1392 # Get the changenode this manifest belongs to
1354 clnode = msng_mnfst_set[mnfstnode]
1393 clnode = msng_mnfst_set[mnfstnode]
1355 # Create the set of filenodes for the file if
1394 # Create the set of filenodes for the file if
1356 # there isn't one already.
1395 # there isn't one already.
1357 ndset = msng_filenode_set.setdefault(f, {})
1396 ndset = msng_filenode_set.setdefault(f, {})
1358 # And set the filenode's changelog node to the
1397 # And set the filenode's changelog node to the
1359 # manifest's if it hasn't been set already.
1398 # manifest's if it hasn't been set already.
1360 ndset.setdefault(fnode, clnode)
1399 ndset.setdefault(fnode, clnode)
1361 else:
1400 else:
1362 # Otherwise we need a full manifest.
1401 # Otherwise we need a full manifest.
1363 m = mnfst.read(mnfstnode)
1402 m = mnfst.read(mnfstnode)
1364 # For every file in we care about.
1403 # For every file in we care about.
1365 for f in changedfiles:
1404 for f in changedfiles:
1366 fnode = m.get(f, None)
1405 fnode = m.get(f, None)
1367 # If it's in the manifest
1406 # If it's in the manifest
1368 if fnode is not None:
1407 if fnode is not None:
1369 # See comments above.
1408 # See comments above.
1370 clnode = msng_mnfst_set[mnfstnode]
1409 clnode = msng_mnfst_set[mnfstnode]
1371 ndset = msng_filenode_set.setdefault(f, {})
1410 ndset = msng_filenode_set.setdefault(f, {})
1372 ndset.setdefault(fnode, clnode)
1411 ndset.setdefault(fnode, clnode)
1373 return collect_msng_filenodes
1412 return collect_msng_filenodes
1374
1413
1375 # If we determine that a particular file or manifest node must be a
1414 # If we determine that a particular file or manifest node must be a
1376 # node that the recipient of the changegroup will already have, we can
1415 # node that the recipient of the changegroup will already have, we can
1377 # also assume the recipient will have all the parents. This function
1416 # also assume the recipient will have all the parents. This function
1378 # prunes them from the set of missing nodes.
1417 # prunes them from the set of missing nodes.
1379 def prune(revlog, missingnodes):
1418 def prune(revlog, missingnodes):
1380 hasset = set()
1419 hasset = set()
1381 # If a 'missing' filenode thinks it belongs to a changenode we
1420 # If a 'missing' filenode thinks it belongs to a changenode we
1382 # assume the recipient must have, then the recipient must have
1421 # assume the recipient must have, then the recipient must have
1383 # that filenode.
1422 # that filenode.
1384 for n in missingnodes:
1423 for n in missingnodes:
1385 clrev = revlog.linkrev(revlog.rev(n))
1424 clrev = revlog.linkrev(revlog.rev(n))
1386 if clrev in commonrevs:
1425 if clrev in commonrevs:
1387 hasset.add(n)
1426 hasset.add(n)
1388 for n in hasset:
1427 for n in hasset:
1389 missingnodes.pop(n, None)
1428 missingnodes.pop(n, None)
1390 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1429 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1391 missingnodes.pop(revlog.node(r), None)
1430 missingnodes.pop(revlog.node(r), None)
1392
1431
1393 # Add the nodes that were explicitly requested.
1432 # Add the nodes that were explicitly requested.
1394 def add_extra_nodes(name, nodes):
1433 def add_extra_nodes(name, nodes):
1395 if not extranodes or name not in extranodes:
1434 if not extranodes or name not in extranodes:
1396 return
1435 return
1397
1436
1398 for node, linknode in extranodes[name]:
1437 for node, linknode in extranodes[name]:
1399 if node not in nodes:
1438 if node not in nodes:
1400 nodes[node] = linknode
1439 nodes[node] = linknode
1401
1440
1402 # Now that we have all theses utility functions to help out and
1441 # Now that we have all theses utility functions to help out and
1403 # logically divide up the task, generate the group.
1442 # logically divide up the task, generate the group.
1404 def gengroup():
1443 def gengroup():
1405 # The set of changed files starts empty.
1444 # The set of changed files starts empty.
1406 changedfiles = set()
1445 changedfiles = set()
1407 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1446 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1408
1447
1409 # Create a changenode group generator that will call our functions
1448 # Create a changenode group generator that will call our functions
1410 # back to lookup the owning changenode and collect information.
1449 # back to lookup the owning changenode and collect information.
1411 group = cl.group(msng_cl_lst, identity, collect)
1450 group = cl.group(msng_cl_lst, identity, collect)
1412 for cnt, chnk in enumerate(group):
1451 for cnt, chnk in enumerate(group):
1413 yield chnk
1452 yield chnk
1414 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1453 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1415 self.ui.progress(_('bundling changes'), None)
1454 self.ui.progress(_('bundling changes'), None)
1416
1455
1417 prune(mnfst, msng_mnfst_set)
1456 prune(mnfst, msng_mnfst_set)
1418 add_extra_nodes(1, msng_mnfst_set)
1457 add_extra_nodes(1, msng_mnfst_set)
1419 msng_mnfst_lst = msng_mnfst_set.keys()
1458 msng_mnfst_lst = msng_mnfst_set.keys()
1420 # Sort the manifestnodes by revision number.
1459 # Sort the manifestnodes by revision number.
1421 msng_mnfst_lst.sort(key=mnfst.rev)
1460 msng_mnfst_lst.sort(key=mnfst.rev)
1422 # Create a generator for the manifestnodes that calls our lookup
1461 # Create a generator for the manifestnodes that calls our lookup
1423 # and data collection functions back.
1462 # and data collection functions back.
1424 group = mnfst.group(msng_mnfst_lst,
1463 group = mnfst.group(msng_mnfst_lst,
1425 lambda mnode: msng_mnfst_set[mnode],
1464 lambda mnode: msng_mnfst_set[mnode],
1426 filenode_collector(changedfiles))
1465 filenode_collector(changedfiles))
1427 for cnt, chnk in enumerate(group):
1466 for cnt, chnk in enumerate(group):
1428 yield chnk
1467 yield chnk
1429 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1468 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1430 self.ui.progress(_('bundling manifests'), None)
1469 self.ui.progress(_('bundling manifests'), None)
1431
1470
1432 # These are no longer needed, dereference and toss the memory for
1471 # These are no longer needed, dereference and toss the memory for
1433 # them.
1472 # them.
1434 msng_mnfst_lst = None
1473 msng_mnfst_lst = None
1435 msng_mnfst_set.clear()
1474 msng_mnfst_set.clear()
1436
1475
1437 if extranodes:
1476 if extranodes:
1438 for fname in extranodes:
1477 for fname in extranodes:
1439 if isinstance(fname, int):
1478 if isinstance(fname, int):
1440 continue
1479 continue
1441 msng_filenode_set.setdefault(fname, {})
1480 msng_filenode_set.setdefault(fname, {})
1442 changedfiles.add(fname)
1481 changedfiles.add(fname)
1443 # Go through all our files in order sorted by name.
1482 # Go through all our files in order sorted by name.
1444 cnt = 0
1483 cnt = 0
1445 for fname in sorted(changedfiles):
1484 for fname in sorted(changedfiles):
1446 filerevlog = self.file(fname)
1485 filerevlog = self.file(fname)
1447 if not len(filerevlog):
1486 if not len(filerevlog):
1448 raise util.Abort(_("empty or missing revlog for %s") % fname)
1487 raise util.Abort(_("empty or missing revlog for %s") % fname)
1449 # Toss out the filenodes that the recipient isn't really
1488 # Toss out the filenodes that the recipient isn't really
1450 # missing.
1489 # missing.
1451 missingfnodes = msng_filenode_set.pop(fname, {})
1490 missingfnodes = msng_filenode_set.pop(fname, {})
1452 prune(filerevlog, missingfnodes)
1491 prune(filerevlog, missingfnodes)
1453 add_extra_nodes(fname, missingfnodes)
1492 add_extra_nodes(fname, missingfnodes)
1454 # If any filenodes are left, generate the group for them,
1493 # If any filenodes are left, generate the group for them,
1455 # otherwise don't bother.
1494 # otherwise don't bother.
1456 if missingfnodes:
1495 if missingfnodes:
1457 yield changegroup.chunkheader(len(fname))
1496 yield changegroup.chunkheader(len(fname))
1458 yield fname
1497 yield fname
1459 # Sort the filenodes by their revision # (topological order)
1498 # Sort the filenodes by their revision # (topological order)
1460 nodeiter = list(missingfnodes)
1499 nodeiter = list(missingfnodes)
1461 nodeiter.sort(key=filerevlog.rev)
1500 nodeiter.sort(key=filerevlog.rev)
1462 # Create a group generator and only pass in a changenode
1501 # Create a group generator and only pass in a changenode
1463 # lookup function as we need to collect no information
1502 # lookup function as we need to collect no information
1464 # from filenodes.
1503 # from filenodes.
1465 group = filerevlog.group(nodeiter,
1504 group = filerevlog.group(nodeiter,
1466 lambda fnode: missingfnodes[fnode])
1505 lambda fnode: missingfnodes[fnode])
1467 for chnk in group:
1506 for chnk in group:
1468 self.ui.progress(
1507 self.ui.progress(
1469 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1508 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1470 cnt += 1
1509 cnt += 1
1471 yield chnk
1510 yield chnk
1472 # Signal that no more groups are left.
1511 # Signal that no more groups are left.
1473 yield changegroup.closechunk()
1512 yield changegroup.closechunk()
1474 self.ui.progress(_('bundling files'), None)
1513 self.ui.progress(_('bundling files'), None)
1475
1514
1476 if msng_cl_lst:
1515 if msng_cl_lst:
1477 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1516 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1478
1517
1479 return util.chunkbuffer(gengroup())
1518 return util.chunkbuffer(gengroup())
1480
1519
1481 def changegroup(self, basenodes, source):
1520 def changegroup(self, basenodes, source):
1482 # to avoid a race we use changegroupsubset() (issue1320)
1521 # to avoid a race we use changegroupsubset() (issue1320)
1483 return self.changegroupsubset(basenodes, self.heads(), source)
1522 return self.changegroupsubset(basenodes, self.heads(), source)
1484
1523
1485 def _changegroup(self, nodes, source):
1524 def _changegroup(self, nodes, source):
1486 """Compute the changegroup of all nodes that we have that a recipient
1525 """Compute the changegroup of all nodes that we have that a recipient
1487 doesn't. Return a chunkbuffer object whose read() method will return
1526 doesn't. Return a chunkbuffer object whose read() method will return
1488 successive changegroup chunks.
1527 successive changegroup chunks.
1489
1528
1490 This is much easier than the previous function as we can assume that
1529 This is much easier than the previous function as we can assume that
1491 the recipient has any changenode we aren't sending them.
1530 the recipient has any changenode we aren't sending them.
1492
1531
1493 nodes is the set of nodes to send"""
1532 nodes is the set of nodes to send"""
1494
1533
1495 self.hook('preoutgoing', throw=True, source=source)
1534 self.hook('preoutgoing', throw=True, source=source)
1496
1535
1497 cl = self.changelog
1536 cl = self.changelog
1498 revset = set([cl.rev(n) for n in nodes])
1537 revset = set([cl.rev(n) for n in nodes])
1499 self.changegroupinfo(nodes, source)
1538 self.changegroupinfo(nodes, source)
1500
1539
1501 def identity(x):
1540 def identity(x):
1502 return x
1541 return x
1503
1542
1504 def gennodelst(log):
1543 def gennodelst(log):
1505 for r in log:
1544 for r in log:
1506 if log.linkrev(r) in revset:
1545 if log.linkrev(r) in revset:
1507 yield log.node(r)
1546 yield log.node(r)
1508
1547
1509 def lookuplinkrev_func(revlog):
1548 def lookuplinkrev_func(revlog):
1510 def lookuplinkrev(n):
1549 def lookuplinkrev(n):
1511 return cl.node(revlog.linkrev(revlog.rev(n)))
1550 return cl.node(revlog.linkrev(revlog.rev(n)))
1512 return lookuplinkrev
1551 return lookuplinkrev
1513
1552
1514 def gengroup():
1553 def gengroup():
1515 '''yield a sequence of changegroup chunks (strings)'''
1554 '''yield a sequence of changegroup chunks (strings)'''
1516 # construct a list of all changed files
1555 # construct a list of all changed files
1517 changedfiles = set()
1556 changedfiles = set()
1518 mmfs = {}
1557 mmfs = {}
1519 collect = changegroup.collector(cl, mmfs, changedfiles)
1558 collect = changegroup.collector(cl, mmfs, changedfiles)
1520
1559
1521 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1560 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1522 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1561 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1523 yield chnk
1562 yield chnk
1524 self.ui.progress(_('bundling changes'), None)
1563 self.ui.progress(_('bundling changes'), None)
1525
1564
1526 mnfst = self.manifest
1565 mnfst = self.manifest
1527 nodeiter = gennodelst(mnfst)
1566 nodeiter = gennodelst(mnfst)
1528 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1567 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1529 lookuplinkrev_func(mnfst))):
1568 lookuplinkrev_func(mnfst))):
1530 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1569 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1531 yield chnk
1570 yield chnk
1532 self.ui.progress(_('bundling manifests'), None)
1571 self.ui.progress(_('bundling manifests'), None)
1533
1572
1534 cnt = 0
1573 cnt = 0
1535 for fname in sorted(changedfiles):
1574 for fname in sorted(changedfiles):
1536 filerevlog = self.file(fname)
1575 filerevlog = self.file(fname)
1537 if not len(filerevlog):
1576 if not len(filerevlog):
1538 raise util.Abort(_("empty or missing revlog for %s") % fname)
1577 raise util.Abort(_("empty or missing revlog for %s") % fname)
1539 nodeiter = gennodelst(filerevlog)
1578 nodeiter = gennodelst(filerevlog)
1540 nodeiter = list(nodeiter)
1579 nodeiter = list(nodeiter)
1541 if nodeiter:
1580 if nodeiter:
1542 yield changegroup.chunkheader(len(fname))
1581 yield changegroup.chunkheader(len(fname))
1543 yield fname
1582 yield fname
1544 lookup = lookuplinkrev_func(filerevlog)
1583 lookup = lookuplinkrev_func(filerevlog)
1545 for chnk in filerevlog.group(nodeiter, lookup):
1584 for chnk in filerevlog.group(nodeiter, lookup):
1546 self.ui.progress(
1585 self.ui.progress(
1547 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1586 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1548 cnt += 1
1587 cnt += 1
1549 yield chnk
1588 yield chnk
1550 self.ui.progress(_('bundling files'), None)
1589 self.ui.progress(_('bundling files'), None)
1551
1590
1552 yield changegroup.closechunk()
1591 yield changegroup.closechunk()
1553
1592
1554 if nodes:
1593 if nodes:
1555 self.hook('outgoing', node=hex(nodes[0]), source=source)
1594 self.hook('outgoing', node=hex(nodes[0]), source=source)
1556
1595
1557 return util.chunkbuffer(gengroup())
1596 return util.chunkbuffer(gengroup())
1558
1597
1559 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1598 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1560 """Add the changegroup returned by source.read() to this repo.
1599 """Add the changegroup returned by source.read() to this repo.
1561 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1600 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1562 the URL of the repo where this changegroup is coming from.
1601 the URL of the repo where this changegroup is coming from.
1563
1602
1564 Return an integer summarizing the change to this repo:
1603 Return an integer summarizing the change to this repo:
1565 - nothing changed or no source: 0
1604 - nothing changed or no source: 0
1566 - more heads than before: 1+added heads (2..n)
1605 - more heads than before: 1+added heads (2..n)
1567 - fewer heads than before: -1-removed heads (-2..-n)
1606 - fewer heads than before: -1-removed heads (-2..-n)
1568 - number of heads stays the same: 1
1607 - number of heads stays the same: 1
1569 """
1608 """
1570 def csmap(x):
1609 def csmap(x):
1571 self.ui.debug("add changeset %s\n" % short(x))
1610 self.ui.debug("add changeset %s\n" % short(x))
1572 return len(cl)
1611 return len(cl)
1573
1612
1574 def revmap(x):
1613 def revmap(x):
1575 return cl.rev(x)
1614 return cl.rev(x)
1576
1615
1577 if not source:
1616 if not source:
1578 return 0
1617 return 0
1579
1618
1580 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1619 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1581
1620
1582 changesets = files = revisions = 0
1621 changesets = files = revisions = 0
1583 efiles = set()
1622 efiles = set()
1584
1623
1585 # write changelog data to temp files so concurrent readers will not see
1624 # write changelog data to temp files so concurrent readers will not see
1586 # inconsistent view
1625 # inconsistent view
1587 cl = self.changelog
1626 cl = self.changelog
1588 cl.delayupdate()
1627 cl.delayupdate()
1589 oldheads = len(cl.heads())
1628 oldheads = len(cl.heads())
1590
1629
1591 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1630 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1592 try:
1631 try:
1593 trp = weakref.proxy(tr)
1632 trp = weakref.proxy(tr)
1594 # pull off the changeset group
1633 # pull off the changeset group
1595 self.ui.status(_("adding changesets\n"))
1634 self.ui.status(_("adding changesets\n"))
1596 clstart = len(cl)
1635 clstart = len(cl)
1597 class prog(object):
1636 class prog(object):
1598 step = _('changesets')
1637 step = _('changesets')
1599 count = 1
1638 count = 1
1600 ui = self.ui
1639 ui = self.ui
1601 total = None
1640 total = None
1602 def __call__(self):
1641 def __call__(self):
1603 self.ui.progress(self.step, self.count, unit=_('chunks'),
1642 self.ui.progress(self.step, self.count, unit=_('chunks'),
1604 total=self.total)
1643 total=self.total)
1605 self.count += 1
1644 self.count += 1
1606 pr = prog()
1645 pr = prog()
1607 chunkiter = changegroup.chunkiter(source, progress=pr)
1646 chunkiter = changegroup.chunkiter(source, progress=pr)
1608 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1647 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1609 raise util.Abort(_("received changelog group is empty"))
1648 raise util.Abort(_("received changelog group is empty"))
1610 clend = len(cl)
1649 clend = len(cl)
1611 changesets = clend - clstart
1650 changesets = clend - clstart
1612 for c in xrange(clstart, clend):
1651 for c in xrange(clstart, clend):
1613 efiles.update(self[c].files())
1652 efiles.update(self[c].files())
1614 efiles = len(efiles)
1653 efiles = len(efiles)
1615 self.ui.progress(_('changesets'), None)
1654 self.ui.progress(_('changesets'), None)
1616
1655
1617 # pull off the manifest group
1656 # pull off the manifest group
1618 self.ui.status(_("adding manifests\n"))
1657 self.ui.status(_("adding manifests\n"))
1619 pr.step = _('manifests')
1658 pr.step = _('manifests')
1620 pr.count = 1
1659 pr.count = 1
1621 pr.total = changesets # manifests <= changesets
1660 pr.total = changesets # manifests <= changesets
1622 chunkiter = changegroup.chunkiter(source, progress=pr)
1661 chunkiter = changegroup.chunkiter(source, progress=pr)
1623 # no need to check for empty manifest group here:
1662 # no need to check for empty manifest group here:
1624 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1663 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1625 # no new manifest will be created and the manifest group will
1664 # no new manifest will be created and the manifest group will
1626 # be empty during the pull
1665 # be empty during the pull
1627 self.manifest.addgroup(chunkiter, revmap, trp)
1666 self.manifest.addgroup(chunkiter, revmap, trp)
1628 self.ui.progress(_('manifests'), None)
1667 self.ui.progress(_('manifests'), None)
1629
1668
1630 needfiles = {}
1669 needfiles = {}
1631 if self.ui.configbool('server', 'validate', default=False):
1670 if self.ui.configbool('server', 'validate', default=False):
1632 # validate incoming csets have their manifests
1671 # validate incoming csets have their manifests
1633 for cset in xrange(clstart, clend):
1672 for cset in xrange(clstart, clend):
1634 mfest = self.changelog.read(self.changelog.node(cset))[0]
1673 mfest = self.changelog.read(self.changelog.node(cset))[0]
1635 mfest = self.manifest.readdelta(mfest)
1674 mfest = self.manifest.readdelta(mfest)
1636 # store file nodes we must see
1675 # store file nodes we must see
1637 for f, n in mfest.iteritems():
1676 for f, n in mfest.iteritems():
1638 needfiles.setdefault(f, set()).add(n)
1677 needfiles.setdefault(f, set()).add(n)
1639
1678
1640 # process the files
1679 # process the files
1641 self.ui.status(_("adding file changes\n"))
1680 self.ui.status(_("adding file changes\n"))
1642 pr.step = 'files'
1681 pr.step = 'files'
1643 pr.count = 1
1682 pr.count = 1
1644 pr.total = efiles
1683 pr.total = efiles
1645 while 1:
1684 while 1:
1646 f = changegroup.getchunk(source)
1685 f = changegroup.getchunk(source)
1647 if not f:
1686 if not f:
1648 break
1687 break
1649 self.ui.debug("adding %s revisions\n" % f)
1688 self.ui.debug("adding %s revisions\n" % f)
1650 pr()
1689 pr()
1651 fl = self.file(f)
1690 fl = self.file(f)
1652 o = len(fl)
1691 o = len(fl)
1653 chunkiter = changegroup.chunkiter(source)
1692 chunkiter = changegroup.chunkiter(source)
1654 if fl.addgroup(chunkiter, revmap, trp) is None:
1693 if fl.addgroup(chunkiter, revmap, trp) is None:
1655 raise util.Abort(_("received file revlog group is empty"))
1694 raise util.Abort(_("received file revlog group is empty"))
1656 revisions += len(fl) - o
1695 revisions += len(fl) - o
1657 files += 1
1696 files += 1
1658 if f in needfiles:
1697 if f in needfiles:
1659 needs = needfiles[f]
1698 needs = needfiles[f]
1660 for new in xrange(o, len(fl)):
1699 for new in xrange(o, len(fl)):
1661 n = fl.node(new)
1700 n = fl.node(new)
1662 if n in needs:
1701 if n in needs:
1663 needs.remove(n)
1702 needs.remove(n)
1664 if not needs:
1703 if not needs:
1665 del needfiles[f]
1704 del needfiles[f]
1666 self.ui.progress(_('files'), None)
1705 self.ui.progress(_('files'), None)
1667
1706
1668 for f, needs in needfiles.iteritems():
1707 for f, needs in needfiles.iteritems():
1669 fl = self.file(f)
1708 fl = self.file(f)
1670 for n in needs:
1709 for n in needs:
1671 try:
1710 try:
1672 fl.rev(n)
1711 fl.rev(n)
1673 except error.LookupError:
1712 except error.LookupError:
1674 raise util.Abort(
1713 raise util.Abort(
1675 _('missing file data for %s:%s - run hg verify') %
1714 _('missing file data for %s:%s - run hg verify') %
1676 (f, hex(n)))
1715 (f, hex(n)))
1677
1716
1678 newheads = len(cl.heads())
1717 newheads = len(cl.heads())
1679 heads = ""
1718 heads = ""
1680 if oldheads and newheads != oldheads:
1719 if oldheads and newheads != oldheads:
1681 heads = _(" (%+d heads)") % (newheads - oldheads)
1720 heads = _(" (%+d heads)") % (newheads - oldheads)
1682
1721
1683 self.ui.status(_("added %d changesets"
1722 self.ui.status(_("added %d changesets"
1684 " with %d changes to %d files%s\n")
1723 " with %d changes to %d files%s\n")
1685 % (changesets, revisions, files, heads))
1724 % (changesets, revisions, files, heads))
1686
1725
1687 if changesets > 0:
1726 if changesets > 0:
1688 p = lambda: cl.writepending() and self.root or ""
1727 p = lambda: cl.writepending() and self.root or ""
1689 self.hook('pretxnchangegroup', throw=True,
1728 self.hook('pretxnchangegroup', throw=True,
1690 node=hex(cl.node(clstart)), source=srctype,
1729 node=hex(cl.node(clstart)), source=srctype,
1691 url=url, pending=p)
1730 url=url, pending=p)
1692
1731
1693 # make changelog see real files again
1732 # make changelog see real files again
1694 cl.finalize(trp)
1733 cl.finalize(trp)
1695
1734
1696 tr.close()
1735 tr.close()
1697 finally:
1736 finally:
1698 tr.release()
1737 tr.release()
1699 if lock:
1738 if lock:
1700 lock.release()
1739 lock.release()
1701
1740
1702 if changesets > 0:
1741 if changesets > 0:
1703 # forcefully update the on-disk branch cache
1742 # forcefully update the on-disk branch cache
1704 self.ui.debug("updating the branch cache\n")
1743 self.ui.debug("updating the branch cache\n")
1705 self.updatebranchcache()
1744 self.updatebranchcache()
1706 self.hook("changegroup", node=hex(cl.node(clstart)),
1745 self.hook("changegroup", node=hex(cl.node(clstart)),
1707 source=srctype, url=url)
1746 source=srctype, url=url)
1708
1747
1709 for i in xrange(clstart, clend):
1748 for i in xrange(clstart, clend):
1710 self.hook("incoming", node=hex(cl.node(i)),
1749 self.hook("incoming", node=hex(cl.node(i)),
1711 source=srctype, url=url)
1750 source=srctype, url=url)
1712
1751
1713 # never return 0 here:
1752 # never return 0 here:
1714 if newheads < oldheads:
1753 if newheads < oldheads:
1715 return newheads - oldheads - 1
1754 return newheads - oldheads - 1
1716 else:
1755 else:
1717 return newheads - oldheads + 1
1756 return newheads - oldheads + 1
1718
1757
1719
1758
1720 def stream_in(self, remote):
1759 def stream_in(self, remote):
1721 fp = remote.stream_out()
1760 fp = remote.stream_out()
1722 l = fp.readline()
1761 l = fp.readline()
1723 try:
1762 try:
1724 resp = int(l)
1763 resp = int(l)
1725 except ValueError:
1764 except ValueError:
1726 raise error.ResponseError(
1765 raise error.ResponseError(
1727 _('Unexpected response from remote server:'), l)
1766 _('Unexpected response from remote server:'), l)
1728 if resp == 1:
1767 if resp == 1:
1729 raise util.Abort(_('operation forbidden by server'))
1768 raise util.Abort(_('operation forbidden by server'))
1730 elif resp == 2:
1769 elif resp == 2:
1731 raise util.Abort(_('locking the remote repository failed'))
1770 raise util.Abort(_('locking the remote repository failed'))
1732 elif resp != 0:
1771 elif resp != 0:
1733 raise util.Abort(_('the server sent an unknown error code'))
1772 raise util.Abort(_('the server sent an unknown error code'))
1734 self.ui.status(_('streaming all changes\n'))
1773 self.ui.status(_('streaming all changes\n'))
1735 l = fp.readline()
1774 l = fp.readline()
1736 try:
1775 try:
1737 total_files, total_bytes = map(int, l.split(' ', 1))
1776 total_files, total_bytes = map(int, l.split(' ', 1))
1738 except (ValueError, TypeError):
1777 except (ValueError, TypeError):
1739 raise error.ResponseError(
1778 raise error.ResponseError(
1740 _('Unexpected response from remote server:'), l)
1779 _('Unexpected response from remote server:'), l)
1741 self.ui.status(_('%d files to transfer, %s of data\n') %
1780 self.ui.status(_('%d files to transfer, %s of data\n') %
1742 (total_files, util.bytecount(total_bytes)))
1781 (total_files, util.bytecount(total_bytes)))
1743 start = time.time()
1782 start = time.time()
1744 for i in xrange(total_files):
1783 for i in xrange(total_files):
1745 # XXX doesn't support '\n' or '\r' in filenames
1784 # XXX doesn't support '\n' or '\r' in filenames
1746 l = fp.readline()
1785 l = fp.readline()
1747 try:
1786 try:
1748 name, size = l.split('\0', 1)
1787 name, size = l.split('\0', 1)
1749 size = int(size)
1788 size = int(size)
1750 except (ValueError, TypeError):
1789 except (ValueError, TypeError):
1751 raise error.ResponseError(
1790 raise error.ResponseError(
1752 _('Unexpected response from remote server:'), l)
1791 _('Unexpected response from remote server:'), l)
1753 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1792 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1754 # for backwards compat, name was partially encoded
1793 # for backwards compat, name was partially encoded
1755 ofp = self.sopener(store.decodedir(name), 'w')
1794 ofp = self.sopener(store.decodedir(name), 'w')
1756 for chunk in util.filechunkiter(fp, limit=size):
1795 for chunk in util.filechunkiter(fp, limit=size):
1757 ofp.write(chunk)
1796 ofp.write(chunk)
1758 ofp.close()
1797 ofp.close()
1759 elapsed = time.time() - start
1798 elapsed = time.time() - start
1760 if elapsed <= 0:
1799 if elapsed <= 0:
1761 elapsed = 0.001
1800 elapsed = 0.001
1762 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1801 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1763 (util.bytecount(total_bytes), elapsed,
1802 (util.bytecount(total_bytes), elapsed,
1764 util.bytecount(total_bytes / elapsed)))
1803 util.bytecount(total_bytes / elapsed)))
1765 self.invalidate()
1804 self.invalidate()
1766 return len(self.heads()) + 1
1805 return len(self.heads()) + 1
1767
1806
1768 def clone(self, remote, heads=[], stream=False):
1807 def clone(self, remote, heads=[], stream=False):
1769 '''clone remote repository.
1808 '''clone remote repository.
1770
1809
1771 keyword arguments:
1810 keyword arguments:
1772 heads: list of revs to clone (forces use of pull)
1811 heads: list of revs to clone (forces use of pull)
1773 stream: use streaming clone if possible'''
1812 stream: use streaming clone if possible'''
1774
1813
1775 # now, all clients that can request uncompressed clones can
1814 # now, all clients that can request uncompressed clones can
1776 # read repo formats supported by all servers that can serve
1815 # read repo formats supported by all servers that can serve
1777 # them.
1816 # them.
1778
1817
1779 # if revlog format changes, client will have to check version
1818 # if revlog format changes, client will have to check version
1780 # and format flags on "stream" capability, and use
1819 # and format flags on "stream" capability, and use
1781 # uncompressed only if compatible.
1820 # uncompressed only if compatible.
1782
1821
1783 if stream and not heads and remote.capable('stream'):
1822 if stream and not heads and remote.capable('stream'):
1784 return self.stream_in(remote)
1823 return self.stream_in(remote)
1785 return self.pull(remote, heads)
1824 return self.pull(remote, heads)
1786
1825
1787 def pushkey(self, namespace, key, old, new):
1826 def pushkey(self, namespace, key, old, new):
1788 return pushkey.push(self, namespace, key, old, new)
1827 return pushkey.push(self, namespace, key, old, new)
1789
1828
1790 def listkeys(self, namespace):
1829 def listkeys(self, namespace):
1791 return pushkey.list(self, namespace)
1830 return pushkey.list(self, namespace)
1792
1831
1793 # used to avoid circular references so destructors work
1832 # used to avoid circular references so destructors work
1794 def aftertrans(files):
1833 def aftertrans(files):
1795 renamefiles = [tuple(t) for t in files]
1834 renamefiles = [tuple(t) for t in files]
1796 def a():
1835 def a():
1797 for src, dest in renamefiles:
1836 for src, dest in renamefiles:
1798 util.rename(src, dest)
1837 util.rename(src, dest)
1799 return a
1838 return a
1800
1839
1801 def instance(ui, path, create):
1840 def instance(ui, path, create):
1802 return localrepository(ui, util.drop_scheme('file', path), create)
1841 return localrepository(ui, util.drop_scheme('file', path), create)
1803
1842
1804 def islocal(path):
1843 def islocal(path):
1805 return True
1844 return True
@@ -1,447 +1,454
1 # subrepo.py - sub-repository handling for Mercurial
1 # subrepo.py - sub-repository handling for Mercurial
2 #
2 #
3 # Copyright 2009-2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2009-2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import errno, os, re, xml.dom.minidom, shutil, urlparse, posixpath
8 import errno, os, re, xml.dom.minidom, shutil, urlparse, posixpath
9 from i18n import _
9 from i18n import _
10 import config, util, node, error
10 import config, util, node, error
11 hg = None
11 hg = None
12
12
13 nullstate = ('', '', 'empty')
13 nullstate = ('', '', 'empty')
14
14
15 def state(ctx, ui):
15 def state(ctx, ui):
16 """return a state dict, mapping subrepo paths configured in .hgsub
16 """return a state dict, mapping subrepo paths configured in .hgsub
17 to tuple: (source from .hgsub, revision from .hgsubstate, kind
17 to tuple: (source from .hgsub, revision from .hgsubstate, kind
18 (key in types dict))
18 (key in types dict))
19 """
19 """
20 p = config.config()
20 p = config.config()
21 def read(f, sections=None, remap=None):
21 def read(f, sections=None, remap=None):
22 if f in ctx:
22 if f in ctx:
23 p.parse(f, ctx[f].data(), sections, remap, read)
23 p.parse(f, ctx[f].data(), sections, remap, read)
24 else:
24 else:
25 raise util.Abort(_("subrepo spec file %s not found") % f)
25 raise util.Abort(_("subrepo spec file %s not found") % f)
26
26
27 if '.hgsub' in ctx:
27 if '.hgsub' in ctx:
28 read('.hgsub')
28 read('.hgsub')
29
29
30 for path, src in ui.configitems('subpaths'):
30 for path, src in ui.configitems('subpaths'):
31 p.set('subpaths', path, src, ui.configsource('subpaths', path))
31 p.set('subpaths', path, src, ui.configsource('subpaths', path))
32
32
33 rev = {}
33 rev = {}
34 if '.hgsubstate' in ctx:
34 if '.hgsubstate' in ctx:
35 try:
35 try:
36 for l in ctx['.hgsubstate'].data().splitlines():
36 for l in ctx['.hgsubstate'].data().splitlines():
37 revision, path = l.split(" ", 1)
37 revision, path = l.split(" ", 1)
38 rev[path] = revision
38 rev[path] = revision
39 except IOError, err:
39 except IOError, err:
40 if err.errno != errno.ENOENT:
40 if err.errno != errno.ENOENT:
41 raise
41 raise
42
42
43 state = {}
43 state = {}
44 for path, src in p[''].items():
44 for path, src in p[''].items():
45 kind = 'hg'
45 kind = 'hg'
46 if src.startswith('['):
46 if src.startswith('['):
47 if ']' not in src:
47 if ']' not in src:
48 raise util.Abort(_('missing ] in subrepo source'))
48 raise util.Abort(_('missing ] in subrepo source'))
49 kind, src = src.split(']', 1)
49 kind, src = src.split(']', 1)
50 kind = kind[1:]
50 kind = kind[1:]
51
51
52 for pattern, repl in p.items('subpaths'):
52 for pattern, repl in p.items('subpaths'):
53 # Turn r'C:\foo\bar' into r'C:\\foo\\bar' since re.sub
53 # Turn r'C:\foo\bar' into r'C:\\foo\\bar' since re.sub
54 # does a string decode.
54 # does a string decode.
55 repl = repl.encode('string-escape')
55 repl = repl.encode('string-escape')
56 # However, we still want to allow back references to go
56 # However, we still want to allow back references to go
57 # through unharmed, so we turn r'\\1' into r'\1'. Again,
57 # through unharmed, so we turn r'\\1' into r'\1'. Again,
58 # extra escapes are needed because re.sub string decodes.
58 # extra escapes are needed because re.sub string decodes.
59 repl = re.sub(r'\\\\([0-9]+)', r'\\\1', repl)
59 repl = re.sub(r'\\\\([0-9]+)', r'\\\1', repl)
60 try:
60 try:
61 src = re.sub(pattern, repl, src, 1)
61 src = re.sub(pattern, repl, src, 1)
62 except re.error, e:
62 except re.error, e:
63 raise util.Abort(_("bad subrepository pattern in %s: %s")
63 raise util.Abort(_("bad subrepository pattern in %s: %s")
64 % (p.source('subpaths', pattern), e))
64 % (p.source('subpaths', pattern), e))
65
65
66 state[path] = (src.strip(), rev.get(path, ''), kind)
66 state[path] = (src.strip(), rev.get(path, ''), kind)
67
67
68 return state
68 return state
69
69
70 def writestate(repo, state):
70 def writestate(repo, state):
71 """rewrite .hgsubstate in (outer) repo with these subrepo states"""
71 """rewrite .hgsubstate in (outer) repo with these subrepo states"""
72 repo.wwrite('.hgsubstate',
72 repo.wwrite('.hgsubstate',
73 ''.join(['%s %s\n' % (state[s][1], s)
73 ''.join(['%s %s\n' % (state[s][1], s)
74 for s in sorted(state)]), '')
74 for s in sorted(state)]), '')
75
75
76 def submerge(repo, wctx, mctx, actx):
76 def submerge(repo, wctx, mctx, actx):
77 """delegated from merge.applyupdates: merging of .hgsubstate file
77 """delegated from merge.applyupdates: merging of .hgsubstate file
78 in working context, merging context and ancestor context"""
78 in working context, merging context and ancestor context"""
79 if mctx == actx: # backwards?
79 if mctx == actx: # backwards?
80 actx = wctx.p1()
80 actx = wctx.p1()
81 s1 = wctx.substate
81 s1 = wctx.substate
82 s2 = mctx.substate
82 s2 = mctx.substate
83 sa = actx.substate
83 sa = actx.substate
84 sm = {}
84 sm = {}
85
85
86 repo.ui.debug("subrepo merge %s %s %s\n" % (wctx, mctx, actx))
86 repo.ui.debug("subrepo merge %s %s %s\n" % (wctx, mctx, actx))
87
87
88 def debug(s, msg, r=""):
88 def debug(s, msg, r=""):
89 if r:
89 if r:
90 r = "%s:%s:%s" % r
90 r = "%s:%s:%s" % r
91 repo.ui.debug(" subrepo %s: %s %s\n" % (s, msg, r))
91 repo.ui.debug(" subrepo %s: %s %s\n" % (s, msg, r))
92
92
93 for s, l in s1.items():
93 for s, l in s1.items():
94 a = sa.get(s, nullstate)
94 a = sa.get(s, nullstate)
95 ld = l # local state with possible dirty flag for compares
95 ld = l # local state with possible dirty flag for compares
96 if wctx.sub(s).dirty():
96 if wctx.sub(s).dirty():
97 ld = (l[0], l[1] + "+")
97 ld = (l[0], l[1] + "+")
98 if wctx == actx: # overwrite
98 if wctx == actx: # overwrite
99 a = ld
99 a = ld
100
100
101 if s in s2:
101 if s in s2:
102 r = s2[s]
102 r = s2[s]
103 if ld == r or r == a: # no change or local is newer
103 if ld == r or r == a: # no change or local is newer
104 sm[s] = l
104 sm[s] = l
105 continue
105 continue
106 elif ld == a: # other side changed
106 elif ld == a: # other side changed
107 debug(s, "other changed, get", r)
107 debug(s, "other changed, get", r)
108 wctx.sub(s).get(r)
108 wctx.sub(s).get(r)
109 sm[s] = r
109 sm[s] = r
110 elif ld[0] != r[0]: # sources differ
110 elif ld[0] != r[0]: # sources differ
111 if repo.ui.promptchoice(
111 if repo.ui.promptchoice(
112 _(' subrepository sources for %s differ\n'
112 _(' subrepository sources for %s differ\n'
113 'use (l)ocal source (%s) or (r)emote source (%s)?')
113 'use (l)ocal source (%s) or (r)emote source (%s)?')
114 % (s, l[0], r[0]),
114 % (s, l[0], r[0]),
115 (_('&Local'), _('&Remote')), 0):
115 (_('&Local'), _('&Remote')), 0):
116 debug(s, "prompt changed, get", r)
116 debug(s, "prompt changed, get", r)
117 wctx.sub(s).get(r)
117 wctx.sub(s).get(r)
118 sm[s] = r
118 sm[s] = r
119 elif ld[1] == a[1]: # local side is unchanged
119 elif ld[1] == a[1]: # local side is unchanged
120 debug(s, "other side changed, get", r)
120 debug(s, "other side changed, get", r)
121 wctx.sub(s).get(r)
121 wctx.sub(s).get(r)
122 sm[s] = r
122 sm[s] = r
123 else:
123 else:
124 debug(s, "both sides changed, merge with", r)
124 debug(s, "both sides changed, merge with", r)
125 wctx.sub(s).merge(r)
125 wctx.sub(s).merge(r)
126 sm[s] = l
126 sm[s] = l
127 elif ld == a: # remote removed, local unchanged
127 elif ld == a: # remote removed, local unchanged
128 debug(s, "remote removed, remove")
128 debug(s, "remote removed, remove")
129 wctx.sub(s).remove()
129 wctx.sub(s).remove()
130 else:
130 else:
131 if repo.ui.promptchoice(
131 if repo.ui.promptchoice(
132 _(' local changed subrepository %s which remote removed\n'
132 _(' local changed subrepository %s which remote removed\n'
133 'use (c)hanged version or (d)elete?') % s,
133 'use (c)hanged version or (d)elete?') % s,
134 (_('&Changed'), _('&Delete')), 0):
134 (_('&Changed'), _('&Delete')), 0):
135 debug(s, "prompt remove")
135 debug(s, "prompt remove")
136 wctx.sub(s).remove()
136 wctx.sub(s).remove()
137
137
138 for s, r in s2.items():
138 for s, r in s2.items():
139 if s in s1:
139 if s in s1:
140 continue
140 continue
141 elif s not in sa:
141 elif s not in sa:
142 debug(s, "remote added, get", r)
142 debug(s, "remote added, get", r)
143 mctx.sub(s).get(r)
143 mctx.sub(s).get(r)
144 sm[s] = r
144 sm[s] = r
145 elif r != sa[s]:
145 elif r != sa[s]:
146 if repo.ui.promptchoice(
146 if repo.ui.promptchoice(
147 _(' remote changed subrepository %s which local removed\n'
147 _(' remote changed subrepository %s which local removed\n'
148 'use (c)hanged version or (d)elete?') % s,
148 'use (c)hanged version or (d)elete?') % s,
149 (_('&Changed'), _('&Delete')), 0) == 0:
149 (_('&Changed'), _('&Delete')), 0) == 0:
150 debug(s, "prompt recreate", r)
150 debug(s, "prompt recreate", r)
151 wctx.sub(s).get(r)
151 wctx.sub(s).get(r)
152 sm[s] = r
152 sm[s] = r
153
153
154 # record merged .hgsubstate
154 # record merged .hgsubstate
155 writestate(repo, sm)
155 writestate(repo, sm)
156
156
157 def relpath(sub):
157 def relpath(sub):
158 """return path to this subrepo as seen from outermost repo"""
158 """return path to this subrepo as seen from outermost repo"""
159 if not hasattr(sub, '_repo'):
159 if not hasattr(sub, '_repo'):
160 return sub._path
160 return sub._path
161 parent = sub._repo
161 parent = sub._repo
162 while hasattr(parent, '_subparent'):
162 while hasattr(parent, '_subparent'):
163 parent = parent._subparent
163 parent = parent._subparent
164 return sub._repo.root[len(parent.root)+1:]
164 return sub._repo.root[len(parent.root)+1:]
165
165
166 def _abssource(repo, push=False):
166 def _abssource(repo, push=False):
167 """return pull/push path of repo - either based on parent repo
167 """return pull/push path of repo - either based on parent repo
168 .hgsub info or on the subrepos own config"""
168 .hgsub info or on the subrepos own config"""
169 if hasattr(repo, '_subparent'):
169 if hasattr(repo, '_subparent'):
170 source = repo._subsource
170 source = repo._subsource
171 if source.startswith('/') or '://' in source:
171 if source.startswith('/') or '://' in source:
172 return source
172 return source
173 parent = _abssource(repo._subparent, push)
173 parent = _abssource(repo._subparent, push)
174 if '://' in parent:
174 if '://' in parent:
175 if parent[-1] == '/':
175 if parent[-1] == '/':
176 parent = parent[:-1]
176 parent = parent[:-1]
177 r = urlparse.urlparse(parent + '/' + source)
177 r = urlparse.urlparse(parent + '/' + source)
178 r = urlparse.urlunparse((r[0], r[1],
178 r = urlparse.urlunparse((r[0], r[1],
179 posixpath.normpath(r[2]),
179 posixpath.normpath(r[2]),
180 r[3], r[4], r[5]))
180 r[3], r[4], r[5]))
181 return r
181 return r
182 return posixpath.normpath(os.path.join(parent, repo._subsource))
182 return posixpath.normpath(os.path.join(parent, repo._subsource))
183 if push and repo.ui.config('paths', 'default-push'):
183 if push and repo.ui.config('paths', 'default-push'):
184 return repo.ui.config('paths', 'default-push', repo.root)
184 return repo.ui.config('paths', 'default-push', repo.root)
185 return repo.ui.config('paths', 'default', repo.root)
185 return repo.ui.config('paths', 'default', repo.root)
186
186
187 def subrepo(ctx, path):
187 def subrepo(ctx, path):
188 """return instance of the right subrepo class for subrepo in path"""
188 """return instance of the right subrepo class for subrepo in path"""
189 # subrepo inherently violates our import layering rules
189 # subrepo inherently violates our import layering rules
190 # because it wants to make repo objects from deep inside the stack
190 # because it wants to make repo objects from deep inside the stack
191 # so we manually delay the circular imports to not break
191 # so we manually delay the circular imports to not break
192 # scripts that don't use our demand-loading
192 # scripts that don't use our demand-loading
193 global hg
193 global hg
194 import hg as h
194 import hg as h
195 hg = h
195 hg = h
196
196
197 util.path_auditor(ctx._repo.root)(path)
197 util.path_auditor(ctx._repo.root)(path)
198 state = ctx.substate.get(path, nullstate)
198 state = ctx.substate.get(path, nullstate)
199 if state[2] not in types:
199 if state[2] not in types:
200 raise util.Abort(_('unknown subrepo type %s') % state[2])
200 raise util.Abort(_('unknown subrepo type %s') % state[2])
201 return types[state[2]](ctx, path, state[:2])
201 return types[state[2]](ctx, path, state[:2])
202
202
203 # subrepo classes need to implement the following abstract class:
203 # subrepo classes need to implement the following abstract class:
204
204
205 class abstractsubrepo(object):
205 class abstractsubrepo(object):
206
206
207 def dirty(self):
207 def dirty(self):
208 """returns true if the dirstate of the subrepo does not match
208 """returns true if the dirstate of the subrepo does not match
209 current stored state
209 current stored state
210 """
210 """
211 raise NotImplementedError
211 raise NotImplementedError
212
212
213 def checknested(path):
214 """check if path is a subrepository within this repository"""
215 return False
216
213 def commit(self, text, user, date):
217 def commit(self, text, user, date):
214 """commit the current changes to the subrepo with the given
218 """commit the current changes to the subrepo with the given
215 log message. Use given user and date if possible. Return the
219 log message. Use given user and date if possible. Return the
216 new state of the subrepo.
220 new state of the subrepo.
217 """
221 """
218 raise NotImplementedError
222 raise NotImplementedError
219
223
220 def remove(self):
224 def remove(self):
221 """remove the subrepo
225 """remove the subrepo
222
226
223 (should verify the dirstate is not dirty first)
227 (should verify the dirstate is not dirty first)
224 """
228 """
225 raise NotImplementedError
229 raise NotImplementedError
226
230
227 def get(self, state):
231 def get(self, state):
228 """run whatever commands are needed to put the subrepo into
232 """run whatever commands are needed to put the subrepo into
229 this state
233 this state
230 """
234 """
231 raise NotImplementedError
235 raise NotImplementedError
232
236
233 def merge(self, state):
237 def merge(self, state):
234 """merge currently-saved state with the new state."""
238 """merge currently-saved state with the new state."""
235 raise NotImplementedError
239 raise NotImplementedError
236
240
237 def push(self, force):
241 def push(self, force):
238 """perform whatever action is analogous to 'hg push'
242 """perform whatever action is analogous to 'hg push'
239
243
240 This may be a no-op on some systems.
244 This may be a no-op on some systems.
241 """
245 """
242 raise NotImplementedError
246 raise NotImplementedError
243
247
244
248
245 class hgsubrepo(abstractsubrepo):
249 class hgsubrepo(abstractsubrepo):
246 def __init__(self, ctx, path, state):
250 def __init__(self, ctx, path, state):
247 self._path = path
251 self._path = path
248 self._state = state
252 self._state = state
249 r = ctx._repo
253 r = ctx._repo
250 root = r.wjoin(path)
254 root = r.wjoin(path)
251 create = False
255 create = False
252 if not os.path.exists(os.path.join(root, '.hg')):
256 if not os.path.exists(os.path.join(root, '.hg')):
253 create = True
257 create = True
254 util.makedirs(root)
258 util.makedirs(root)
255 self._repo = hg.repository(r.ui, root, create=create)
259 self._repo = hg.repository(r.ui, root, create=create)
256 self._repo._subparent = r
260 self._repo._subparent = r
257 self._repo._subsource = state[0]
261 self._repo._subsource = state[0]
258
262
259 if create:
263 if create:
260 fp = self._repo.opener("hgrc", "w", text=True)
264 fp = self._repo.opener("hgrc", "w", text=True)
261 fp.write('[paths]\n')
265 fp.write('[paths]\n')
262
266
263 def addpathconfig(key, value):
267 def addpathconfig(key, value):
264 fp.write('%s = %s\n' % (key, value))
268 fp.write('%s = %s\n' % (key, value))
265 self._repo.ui.setconfig('paths', key, value)
269 self._repo.ui.setconfig('paths', key, value)
266
270
267 defpath = _abssource(self._repo)
271 defpath = _abssource(self._repo)
268 defpushpath = _abssource(self._repo, True)
272 defpushpath = _abssource(self._repo, True)
269 addpathconfig('default', defpath)
273 addpathconfig('default', defpath)
270 if defpath != defpushpath:
274 if defpath != defpushpath:
271 addpathconfig('default-push', defpushpath)
275 addpathconfig('default-push', defpushpath)
272 fp.close()
276 fp.close()
273
277
274 def dirty(self):
278 def dirty(self):
275 r = self._state[1]
279 r = self._state[1]
276 if r == '':
280 if r == '':
277 return True
281 return True
278 w = self._repo[None]
282 w = self._repo[None]
279 if w.p1() != self._repo[r]: # version checked out change
283 if w.p1() != self._repo[r]: # version checked out change
280 return True
284 return True
281 return w.dirty() # working directory changed
285 return w.dirty() # working directory changed
282
286
287 def checknested(self, path):
288 return self._repo._checknested(self._repo.wjoin(path))
289
283 def commit(self, text, user, date):
290 def commit(self, text, user, date):
284 self._repo.ui.debug("committing subrepo %s\n" % relpath(self))
291 self._repo.ui.debug("committing subrepo %s\n" % relpath(self))
285 n = self._repo.commit(text, user, date)
292 n = self._repo.commit(text, user, date)
286 if not n:
293 if not n:
287 return self._repo['.'].hex() # different version checked out
294 return self._repo['.'].hex() # different version checked out
288 return node.hex(n)
295 return node.hex(n)
289
296
290 def remove(self):
297 def remove(self):
291 # we can't fully delete the repository as it may contain
298 # we can't fully delete the repository as it may contain
292 # local-only history
299 # local-only history
293 self._repo.ui.note(_('removing subrepo %s\n') % relpath(self))
300 self._repo.ui.note(_('removing subrepo %s\n') % relpath(self))
294 hg.clean(self._repo, node.nullid, False)
301 hg.clean(self._repo, node.nullid, False)
295
302
296 def _get(self, state):
303 def _get(self, state):
297 source, revision, kind = state
304 source, revision, kind = state
298 try:
305 try:
299 self._repo.lookup(revision)
306 self._repo.lookup(revision)
300 except error.RepoError:
307 except error.RepoError:
301 self._repo._subsource = source
308 self._repo._subsource = source
302 srcurl = _abssource(self._repo)
309 srcurl = _abssource(self._repo)
303 self._repo.ui.status(_('pulling subrepo %s from %s\n')
310 self._repo.ui.status(_('pulling subrepo %s from %s\n')
304 % (relpath(self), srcurl))
311 % (relpath(self), srcurl))
305 other = hg.repository(self._repo.ui, srcurl)
312 other = hg.repository(self._repo.ui, srcurl)
306 self._repo.pull(other)
313 self._repo.pull(other)
307
314
308 def get(self, state):
315 def get(self, state):
309 self._get(state)
316 self._get(state)
310 source, revision, kind = state
317 source, revision, kind = state
311 self._repo.ui.debug("getting subrepo %s\n" % self._path)
318 self._repo.ui.debug("getting subrepo %s\n" % self._path)
312 hg.clean(self._repo, revision, False)
319 hg.clean(self._repo, revision, False)
313
320
314 def merge(self, state):
321 def merge(self, state):
315 self._get(state)
322 self._get(state)
316 cur = self._repo['.']
323 cur = self._repo['.']
317 dst = self._repo[state[1]]
324 dst = self._repo[state[1]]
318 anc = dst.ancestor(cur)
325 anc = dst.ancestor(cur)
319 if anc == cur:
326 if anc == cur:
320 self._repo.ui.debug("updating subrepo %s\n" % relpath(self))
327 self._repo.ui.debug("updating subrepo %s\n" % relpath(self))
321 hg.update(self._repo, state[1])
328 hg.update(self._repo, state[1])
322 elif anc == dst:
329 elif anc == dst:
323 self._repo.ui.debug("skipping subrepo %s\n" % relpath(self))
330 self._repo.ui.debug("skipping subrepo %s\n" % relpath(self))
324 else:
331 else:
325 self._repo.ui.debug("merging subrepo %s\n" % relpath(self))
332 self._repo.ui.debug("merging subrepo %s\n" % relpath(self))
326 hg.merge(self._repo, state[1], remind=False)
333 hg.merge(self._repo, state[1], remind=False)
327
334
328 def push(self, force):
335 def push(self, force):
329 # push subrepos depth-first for coherent ordering
336 # push subrepos depth-first for coherent ordering
330 c = self._repo['']
337 c = self._repo['']
331 subs = c.substate # only repos that are committed
338 subs = c.substate # only repos that are committed
332 for s in sorted(subs):
339 for s in sorted(subs):
333 if not c.sub(s).push(force):
340 if not c.sub(s).push(force):
334 return False
341 return False
335
342
336 dsturl = _abssource(self._repo, True)
343 dsturl = _abssource(self._repo, True)
337 self._repo.ui.status(_('pushing subrepo %s to %s\n') %
344 self._repo.ui.status(_('pushing subrepo %s to %s\n') %
338 (relpath(self), dsturl))
345 (relpath(self), dsturl))
339 other = hg.repository(self._repo.ui, dsturl)
346 other = hg.repository(self._repo.ui, dsturl)
340 return self._repo.push(other, force)
347 return self._repo.push(other, force)
341
348
342 class svnsubrepo(abstractsubrepo):
349 class svnsubrepo(abstractsubrepo):
343 def __init__(self, ctx, path, state):
350 def __init__(self, ctx, path, state):
344 self._path = path
351 self._path = path
345 self._state = state
352 self._state = state
346 self._ctx = ctx
353 self._ctx = ctx
347 self._ui = ctx._repo.ui
354 self._ui = ctx._repo.ui
348
355
349 def _svncommand(self, commands, filename=''):
356 def _svncommand(self, commands, filename=''):
350 path = os.path.join(self._ctx._repo.origroot, self._path, filename)
357 path = os.path.join(self._ctx._repo.origroot, self._path, filename)
351 cmd = ['svn'] + commands + [path]
358 cmd = ['svn'] + commands + [path]
352 cmd = [util.shellquote(arg) for arg in cmd]
359 cmd = [util.shellquote(arg) for arg in cmd]
353 cmd = util.quotecommand(' '.join(cmd))
360 cmd = util.quotecommand(' '.join(cmd))
354 env = dict(os.environ)
361 env = dict(os.environ)
355 # Avoid localized output, preserve current locale for everything else.
362 # Avoid localized output, preserve current locale for everything else.
356 env['LC_MESSAGES'] = 'C'
363 env['LC_MESSAGES'] = 'C'
357 write, read, err = util.popen3(cmd, env=env, newlines=True)
364 write, read, err = util.popen3(cmd, env=env, newlines=True)
358 retdata = read.read()
365 retdata = read.read()
359 err = err.read().strip()
366 err = err.read().strip()
360 if err:
367 if err:
361 raise util.Abort(err)
368 raise util.Abort(err)
362 return retdata
369 return retdata
363
370
364 def _wcrev(self):
371 def _wcrev(self):
365 output = self._svncommand(['info', '--xml'])
372 output = self._svncommand(['info', '--xml'])
366 doc = xml.dom.minidom.parseString(output)
373 doc = xml.dom.minidom.parseString(output)
367 entries = doc.getElementsByTagName('entry')
374 entries = doc.getElementsByTagName('entry')
368 if not entries:
375 if not entries:
369 return 0
376 return 0
370 return int(entries[0].getAttribute('revision') or 0)
377 return int(entries[0].getAttribute('revision') or 0)
371
378
372 def _wcchanged(self):
379 def _wcchanged(self):
373 """Return (changes, extchanges) where changes is True
380 """Return (changes, extchanges) where changes is True
374 if the working directory was changed, and extchanges is
381 if the working directory was changed, and extchanges is
375 True if any of these changes concern an external entry.
382 True if any of these changes concern an external entry.
376 """
383 """
377 output = self._svncommand(['status', '--xml'])
384 output = self._svncommand(['status', '--xml'])
378 externals, changes = [], []
385 externals, changes = [], []
379 doc = xml.dom.minidom.parseString(output)
386 doc = xml.dom.minidom.parseString(output)
380 for e in doc.getElementsByTagName('entry'):
387 for e in doc.getElementsByTagName('entry'):
381 s = e.getElementsByTagName('wc-status')
388 s = e.getElementsByTagName('wc-status')
382 if not s:
389 if not s:
383 continue
390 continue
384 item = s[0].getAttribute('item')
391 item = s[0].getAttribute('item')
385 props = s[0].getAttribute('props')
392 props = s[0].getAttribute('props')
386 path = e.getAttribute('path')
393 path = e.getAttribute('path')
387 if item == 'external':
394 if item == 'external':
388 externals.append(path)
395 externals.append(path)
389 if (item not in ('', 'normal', 'unversioned', 'external')
396 if (item not in ('', 'normal', 'unversioned', 'external')
390 or props not in ('', 'none')):
397 or props not in ('', 'none')):
391 changes.append(path)
398 changes.append(path)
392 for path in changes:
399 for path in changes:
393 for ext in externals:
400 for ext in externals:
394 if path == ext or path.startswith(ext + os.sep):
401 if path == ext or path.startswith(ext + os.sep):
395 return True, True
402 return True, True
396 return bool(changes), False
403 return bool(changes), False
397
404
398 def dirty(self):
405 def dirty(self):
399 if self._wcrev() == self._state[1] and not self._wcchanged()[0]:
406 if self._wcrev() == self._state[1] and not self._wcchanged()[0]:
400 return False
407 return False
401 return True
408 return True
402
409
403 def commit(self, text, user, date):
410 def commit(self, text, user, date):
404 # user and date are out of our hands since svn is centralized
411 # user and date are out of our hands since svn is centralized
405 changed, extchanged = self._wcchanged()
412 changed, extchanged = self._wcchanged()
406 if not changed:
413 if not changed:
407 return self._wcrev()
414 return self._wcrev()
408 if extchanged:
415 if extchanged:
409 # Do not try to commit externals
416 # Do not try to commit externals
410 raise util.Abort(_('cannot commit svn externals'))
417 raise util.Abort(_('cannot commit svn externals'))
411 commitinfo = self._svncommand(['commit', '-m', text])
418 commitinfo = self._svncommand(['commit', '-m', text])
412 self._ui.status(commitinfo)
419 self._ui.status(commitinfo)
413 newrev = re.search('Committed revision ([0-9]+).', commitinfo)
420 newrev = re.search('Committed revision ([0-9]+).', commitinfo)
414 if not newrev:
421 if not newrev:
415 raise util.Abort(commitinfo.splitlines()[-1])
422 raise util.Abort(commitinfo.splitlines()[-1])
416 newrev = newrev.groups()[0]
423 newrev = newrev.groups()[0]
417 self._ui.status(self._svncommand(['update', '-r', newrev]))
424 self._ui.status(self._svncommand(['update', '-r', newrev]))
418 return newrev
425 return newrev
419
426
420 def remove(self):
427 def remove(self):
421 if self.dirty():
428 if self.dirty():
422 self._ui.warn(_('not removing repo %s because '
429 self._ui.warn(_('not removing repo %s because '
423 'it has changes.\n' % self._path))
430 'it has changes.\n' % self._path))
424 return
431 return
425 self._ui.note(_('removing subrepo %s\n') % self._path)
432 self._ui.note(_('removing subrepo %s\n') % self._path)
426 shutil.rmtree(self._ctx.repo.join(self._path))
433 shutil.rmtree(self._ctx.repo.join(self._path))
427
434
428 def get(self, state):
435 def get(self, state):
429 status = self._svncommand(['checkout', state[0], '--revision', state[1]])
436 status = self._svncommand(['checkout', state[0], '--revision', state[1]])
430 if not re.search('Checked out revision [0-9]+.', status):
437 if not re.search('Checked out revision [0-9]+.', status):
431 raise util.Abort(status.splitlines()[-1])
438 raise util.Abort(status.splitlines()[-1])
432 self._ui.status(status)
439 self._ui.status(status)
433
440
434 def merge(self, state):
441 def merge(self, state):
435 old = int(self._state[1])
442 old = int(self._state[1])
436 new = int(state[1])
443 new = int(state[1])
437 if new > old:
444 if new > old:
438 self.get(state)
445 self.get(state)
439
446
440 def push(self, force):
447 def push(self, force):
441 # push is a no-op for SVN
448 # push is a no-op for SVN
442 return True
449 return True
443
450
444 types = {
451 types = {
445 'hg': hgsubrepo,
452 'hg': hgsubrepo,
446 'svn': svnsubrepo,
453 'svn': svnsubrepo,
447 }
454 }
General Comments 0
You need to be logged in to leave comments. Login now