##// END OF EJS Templates
subrepo: backout f02d7a562a21...
Erik Zielke -
r13172:84cec589 default
parent child Browse files
Show More
@@ -1,1951 +1,1938 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 import url as urlmod
17 import url as urlmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 propertycache = util.propertycache
20 propertycache = util.propertycache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
24 supportedformats = set(('revlogv1', 'parentdelta'))
24 supportedformats = set(('revlogv1', 'parentdelta'))
25 supported = supportedformats | set(('store', 'fncache', 'shared',
25 supported = supportedformats | set(('store', 'fncache', 'shared',
26 'dotencode'))
26 'dotencode'))
27
27
28 def __init__(self, baseui, path=None, create=0):
28 def __init__(self, baseui, path=None, create=0):
29 repo.repository.__init__(self)
29 repo.repository.__init__(self)
30 self.root = os.path.realpath(util.expandpath(path))
30 self.root = os.path.realpath(util.expandpath(path))
31 self.path = os.path.join(self.root, ".hg")
31 self.path = os.path.join(self.root, ".hg")
32 self.origroot = path
32 self.origroot = path
33 self.auditor = util.path_auditor(self.root, self._checknested)
33 self.auditor = util.path_auditor(self.root, self._checknested)
34 self.opener = util.opener(self.path)
34 self.opener = util.opener(self.path)
35 self.wopener = util.opener(self.root)
35 self.wopener = util.opener(self.root)
36 self.baseui = baseui
36 self.baseui = baseui
37 self.ui = baseui.copy()
37 self.ui = baseui.copy()
38
38
39 try:
39 try:
40 self.ui.readconfig(self.join("hgrc"), self.root)
40 self.ui.readconfig(self.join("hgrc"), self.root)
41 extensions.loadall(self.ui)
41 extensions.loadall(self.ui)
42 except IOError:
42 except IOError:
43 pass
43 pass
44
44
45 if not os.path.isdir(self.path):
45 if not os.path.isdir(self.path):
46 if create:
46 if create:
47 if not os.path.exists(path):
47 if not os.path.exists(path):
48 util.makedirs(path)
48 util.makedirs(path)
49 os.mkdir(self.path)
49 os.mkdir(self.path)
50 requirements = ["revlogv1"]
50 requirements = ["revlogv1"]
51 if self.ui.configbool('format', 'usestore', True):
51 if self.ui.configbool('format', 'usestore', True):
52 os.mkdir(os.path.join(self.path, "store"))
52 os.mkdir(os.path.join(self.path, "store"))
53 requirements.append("store")
53 requirements.append("store")
54 if self.ui.configbool('format', 'usefncache', True):
54 if self.ui.configbool('format', 'usefncache', True):
55 requirements.append("fncache")
55 requirements.append("fncache")
56 if self.ui.configbool('format', 'dotencode', True):
56 if self.ui.configbool('format', 'dotencode', True):
57 requirements.append('dotencode')
57 requirements.append('dotencode')
58 # create an invalid changelog
58 # create an invalid changelog
59 self.opener("00changelog.i", "a").write(
59 self.opener("00changelog.i", "a").write(
60 '\0\0\0\2' # represents revlogv2
60 '\0\0\0\2' # represents revlogv2
61 ' dummy changelog to prevent using the old repo layout'
61 ' dummy changelog to prevent using the old repo layout'
62 )
62 )
63 if self.ui.configbool('format', 'parentdelta', False):
63 if self.ui.configbool('format', 'parentdelta', False):
64 requirements.append("parentdelta")
64 requirements.append("parentdelta")
65 else:
65 else:
66 raise error.RepoError(_("repository %s not found") % path)
66 raise error.RepoError(_("repository %s not found") % path)
67 elif create:
67 elif create:
68 raise error.RepoError(_("repository %s already exists") % path)
68 raise error.RepoError(_("repository %s already exists") % path)
69 else:
69 else:
70 # find requirements
70 # find requirements
71 requirements = set()
71 requirements = set()
72 try:
72 try:
73 requirements = set(self.opener("requires").read().splitlines())
73 requirements = set(self.opener("requires").read().splitlines())
74 except IOError, inst:
74 except IOError, inst:
75 if inst.errno != errno.ENOENT:
75 if inst.errno != errno.ENOENT:
76 raise
76 raise
77 for r in requirements - self.supported:
77 for r in requirements - self.supported:
78 raise error.RepoError(_("requirement '%s' not supported") % r)
78 raise error.RepoError(_("requirement '%s' not supported") % r)
79
79
80 self.sharedpath = self.path
80 self.sharedpath = self.path
81 try:
81 try:
82 s = os.path.realpath(self.opener("sharedpath").read())
82 s = os.path.realpath(self.opener("sharedpath").read())
83 if not os.path.exists(s):
83 if not os.path.exists(s):
84 raise error.RepoError(
84 raise error.RepoError(
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 self.sharedpath = s
86 self.sharedpath = s
87 except IOError, inst:
87 except IOError, inst:
88 if inst.errno != errno.ENOENT:
88 if inst.errno != errno.ENOENT:
89 raise
89 raise
90
90
91 self.store = store.store(requirements, self.sharedpath, util.opener)
91 self.store = store.store(requirements, self.sharedpath, util.opener)
92 self.spath = self.store.path
92 self.spath = self.store.path
93 self.sopener = self.store.opener
93 self.sopener = self.store.opener
94 self.sjoin = self.store.join
94 self.sjoin = self.store.join
95 self.opener.createmode = self.store.createmode
95 self.opener.createmode = self.store.createmode
96 self._applyrequirements(requirements)
96 self._applyrequirements(requirements)
97 if create:
97 if create:
98 self._writerequirements()
98 self._writerequirements()
99
99
100 # These two define the set of tags for this repository. _tags
100 # These two define the set of tags for this repository. _tags
101 # maps tag name to node; _tagtypes maps tag name to 'global' or
101 # maps tag name to node; _tagtypes maps tag name to 'global' or
102 # 'local'. (Global tags are defined by .hgtags across all
102 # 'local'. (Global tags are defined by .hgtags across all
103 # heads, and local tags are defined in .hg/localtags.) They
103 # heads, and local tags are defined in .hg/localtags.) They
104 # constitute the in-memory cache of tags.
104 # constitute the in-memory cache of tags.
105 self._tags = None
105 self._tags = None
106 self._tagtypes = None
106 self._tagtypes = None
107
107
108 self._branchcache = None
108 self._branchcache = None
109 self._branchcachetip = None
109 self._branchcachetip = None
110 self.nodetagscache = None
110 self.nodetagscache = None
111 self.filterpats = {}
111 self.filterpats = {}
112 self._datafilters = {}
112 self._datafilters = {}
113 self._transref = self._lockref = self._wlockref = None
113 self._transref = self._lockref = self._wlockref = None
114
114
115 def _applyrequirements(self, requirements):
115 def _applyrequirements(self, requirements):
116 self.requirements = requirements
116 self.requirements = requirements
117 self.sopener.options = {}
117 self.sopener.options = {}
118 if 'parentdelta' in requirements:
118 if 'parentdelta' in requirements:
119 self.sopener.options['parentdelta'] = 1
119 self.sopener.options['parentdelta'] = 1
120
120
121 def _writerequirements(self):
121 def _writerequirements(self):
122 reqfile = self.opener("requires", "w")
122 reqfile = self.opener("requires", "w")
123 for r in self.requirements:
123 for r in self.requirements:
124 reqfile.write("%s\n" % r)
124 reqfile.write("%s\n" % r)
125 reqfile.close()
125 reqfile.close()
126
126
127 def _checknested(self, path):
127 def _checknested(self, path):
128 """Determine if path is a legal nested repository."""
128 """Determine if path is a legal nested repository."""
129 if not path.startswith(self.root):
129 if not path.startswith(self.root):
130 return False
130 return False
131 subpath = path[len(self.root) + 1:]
131 subpath = path[len(self.root) + 1:]
132
132
133 # XXX: Checking against the current working copy is wrong in
133 # XXX: Checking against the current working copy is wrong in
134 # the sense that it can reject things like
134 # the sense that it can reject things like
135 #
135 #
136 # $ hg cat -r 10 sub/x.txt
136 # $ hg cat -r 10 sub/x.txt
137 #
137 #
138 # if sub/ is no longer a subrepository in the working copy
138 # if sub/ is no longer a subrepository in the working copy
139 # parent revision.
139 # parent revision.
140 #
140 #
141 # However, it can of course also allow things that would have
141 # However, it can of course also allow things that would have
142 # been rejected before, such as the above cat command if sub/
142 # been rejected before, such as the above cat command if sub/
143 # is a subrepository now, but was a normal directory before.
143 # is a subrepository now, but was a normal directory before.
144 # The old path auditor would have rejected by mistake since it
144 # The old path auditor would have rejected by mistake since it
145 # panics when it sees sub/.hg/.
145 # panics when it sees sub/.hg/.
146 #
146 #
147 # All in all, checking against the working copy seems sensible
147 # All in all, checking against the working copy seems sensible
148 # since we want to prevent access to nested repositories on
148 # since we want to prevent access to nested repositories on
149 # the filesystem *now*.
149 # the filesystem *now*.
150 ctx = self[None]
150 ctx = self[None]
151 parts = util.splitpath(subpath)
151 parts = util.splitpath(subpath)
152 while parts:
152 while parts:
153 prefix = os.sep.join(parts)
153 prefix = os.sep.join(parts)
154 if prefix in ctx.substate:
154 if prefix in ctx.substate:
155 if prefix == subpath:
155 if prefix == subpath:
156 return True
156 return True
157 else:
157 else:
158 sub = ctx.sub(prefix)
158 sub = ctx.sub(prefix)
159 return sub.checknested(subpath[len(prefix) + 1:])
159 return sub.checknested(subpath[len(prefix) + 1:])
160 else:
160 else:
161 parts.pop()
161 parts.pop()
162 return False
162 return False
163
163
164
164
165 @propertycache
165 @propertycache
166 def changelog(self):
166 def changelog(self):
167 c = changelog.changelog(self.sopener)
167 c = changelog.changelog(self.sopener)
168 if 'HG_PENDING' in os.environ:
168 if 'HG_PENDING' in os.environ:
169 p = os.environ['HG_PENDING']
169 p = os.environ['HG_PENDING']
170 if p.startswith(self.root):
170 if p.startswith(self.root):
171 c.readpending('00changelog.i.a')
171 c.readpending('00changelog.i.a')
172 self.sopener.options['defversion'] = c.version
172 self.sopener.options['defversion'] = c.version
173 return c
173 return c
174
174
175 @propertycache
175 @propertycache
176 def manifest(self):
176 def manifest(self):
177 return manifest.manifest(self.sopener)
177 return manifest.manifest(self.sopener)
178
178
179 @propertycache
179 @propertycache
180 def dirstate(self):
180 def dirstate(self):
181 warned = [0]
181 warned = [0]
182 def validate(node):
182 def validate(node):
183 try:
183 try:
184 r = self.changelog.rev(node)
184 r = self.changelog.rev(node)
185 return node
185 return node
186 except error.LookupError:
186 except error.LookupError:
187 if not warned[0]:
187 if not warned[0]:
188 warned[0] = True
188 warned[0] = True
189 self.ui.warn(_("warning: ignoring unknown"
189 self.ui.warn(_("warning: ignoring unknown"
190 " working parent %s!\n") % short(node))
190 " working parent %s!\n") % short(node))
191 return nullid
191 return nullid
192
192
193 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
193 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
194
194
195 def __getitem__(self, changeid):
195 def __getitem__(self, changeid):
196 if changeid is None:
196 if changeid is None:
197 return context.workingctx(self)
197 return context.workingctx(self)
198 return context.changectx(self, changeid)
198 return context.changectx(self, changeid)
199
199
200 def __contains__(self, changeid):
200 def __contains__(self, changeid):
201 try:
201 try:
202 return bool(self.lookup(changeid))
202 return bool(self.lookup(changeid))
203 except error.RepoLookupError:
203 except error.RepoLookupError:
204 return False
204 return False
205
205
206 def __nonzero__(self):
206 def __nonzero__(self):
207 return True
207 return True
208
208
209 def __len__(self):
209 def __len__(self):
210 return len(self.changelog)
210 return len(self.changelog)
211
211
212 def __iter__(self):
212 def __iter__(self):
213 for i in xrange(len(self)):
213 for i in xrange(len(self)):
214 yield i
214 yield i
215
215
216 def url(self):
216 def url(self):
217 return 'file:' + self.root
217 return 'file:' + self.root
218
218
219 def hook(self, name, throw=False, **args):
219 def hook(self, name, throw=False, **args):
220 return hook.hook(self.ui, self, name, throw, **args)
220 return hook.hook(self.ui, self, name, throw, **args)
221
221
222 tag_disallowed = ':\r\n'
222 tag_disallowed = ':\r\n'
223
223
224 def _tag(self, names, node, message, local, user, date, extra={}):
224 def _tag(self, names, node, message, local, user, date, extra={}):
225 if isinstance(names, str):
225 if isinstance(names, str):
226 allchars = names
226 allchars = names
227 names = (names,)
227 names = (names,)
228 else:
228 else:
229 allchars = ''.join(names)
229 allchars = ''.join(names)
230 for c in self.tag_disallowed:
230 for c in self.tag_disallowed:
231 if c in allchars:
231 if c in allchars:
232 raise util.Abort(_('%r cannot be used in a tag name') % c)
232 raise util.Abort(_('%r cannot be used in a tag name') % c)
233
233
234 branches = self.branchmap()
234 branches = self.branchmap()
235 for name in names:
235 for name in names:
236 self.hook('pretag', throw=True, node=hex(node), tag=name,
236 self.hook('pretag', throw=True, node=hex(node), tag=name,
237 local=local)
237 local=local)
238 if name in branches:
238 if name in branches:
239 self.ui.warn(_("warning: tag %s conflicts with existing"
239 self.ui.warn(_("warning: tag %s conflicts with existing"
240 " branch name\n") % name)
240 " branch name\n") % name)
241
241
242 def writetags(fp, names, munge, prevtags):
242 def writetags(fp, names, munge, prevtags):
243 fp.seek(0, 2)
243 fp.seek(0, 2)
244 if prevtags and prevtags[-1] != '\n':
244 if prevtags and prevtags[-1] != '\n':
245 fp.write('\n')
245 fp.write('\n')
246 for name in names:
246 for name in names:
247 m = munge and munge(name) or name
247 m = munge and munge(name) or name
248 if self._tagtypes and name in self._tagtypes:
248 if self._tagtypes and name in self._tagtypes:
249 old = self._tags.get(name, nullid)
249 old = self._tags.get(name, nullid)
250 fp.write('%s %s\n' % (hex(old), m))
250 fp.write('%s %s\n' % (hex(old), m))
251 fp.write('%s %s\n' % (hex(node), m))
251 fp.write('%s %s\n' % (hex(node), m))
252 fp.close()
252 fp.close()
253
253
254 prevtags = ''
254 prevtags = ''
255 if local:
255 if local:
256 try:
256 try:
257 fp = self.opener('localtags', 'r+')
257 fp = self.opener('localtags', 'r+')
258 except IOError:
258 except IOError:
259 fp = self.opener('localtags', 'a')
259 fp = self.opener('localtags', 'a')
260 else:
260 else:
261 prevtags = fp.read()
261 prevtags = fp.read()
262
262
263 # local tags are stored in the current charset
263 # local tags are stored in the current charset
264 writetags(fp, names, None, prevtags)
264 writetags(fp, names, None, prevtags)
265 for name in names:
265 for name in names:
266 self.hook('tag', node=hex(node), tag=name, local=local)
266 self.hook('tag', node=hex(node), tag=name, local=local)
267 return
267 return
268
268
269 try:
269 try:
270 fp = self.wfile('.hgtags', 'rb+')
270 fp = self.wfile('.hgtags', 'rb+')
271 except IOError:
271 except IOError:
272 fp = self.wfile('.hgtags', 'ab')
272 fp = self.wfile('.hgtags', 'ab')
273 else:
273 else:
274 prevtags = fp.read()
274 prevtags = fp.read()
275
275
276 # committed tags are stored in UTF-8
276 # committed tags are stored in UTF-8
277 writetags(fp, names, encoding.fromlocal, prevtags)
277 writetags(fp, names, encoding.fromlocal, prevtags)
278
278
279 if '.hgtags' not in self.dirstate:
279 if '.hgtags' not in self.dirstate:
280 self[None].add(['.hgtags'])
280 self[None].add(['.hgtags'])
281
281
282 m = matchmod.exact(self.root, '', ['.hgtags'])
282 m = matchmod.exact(self.root, '', ['.hgtags'])
283 tagnode = self.commit(message, user, date, extra=extra, match=m)
283 tagnode = self.commit(message, user, date, extra=extra, match=m)
284
284
285 for name in names:
285 for name in names:
286 self.hook('tag', node=hex(node), tag=name, local=local)
286 self.hook('tag', node=hex(node), tag=name, local=local)
287
287
288 return tagnode
288 return tagnode
289
289
290 def tag(self, names, node, message, local, user, date):
290 def tag(self, names, node, message, local, user, date):
291 '''tag a revision with one or more symbolic names.
291 '''tag a revision with one or more symbolic names.
292
292
293 names is a list of strings or, when adding a single tag, names may be a
293 names is a list of strings or, when adding a single tag, names may be a
294 string.
294 string.
295
295
296 if local is True, the tags are stored in a per-repository file.
296 if local is True, the tags are stored in a per-repository file.
297 otherwise, they are stored in the .hgtags file, and a new
297 otherwise, they are stored in the .hgtags file, and a new
298 changeset is committed with the change.
298 changeset is committed with the change.
299
299
300 keyword arguments:
300 keyword arguments:
301
301
302 local: whether to store tags in non-version-controlled file
302 local: whether to store tags in non-version-controlled file
303 (default False)
303 (default False)
304
304
305 message: commit message to use if committing
305 message: commit message to use if committing
306
306
307 user: name of user to use if committing
307 user: name of user to use if committing
308
308
309 date: date tuple to use if committing'''
309 date: date tuple to use if committing'''
310
310
311 if not local:
311 if not local:
312 for x in self.status()[:5]:
312 for x in self.status()[:5]:
313 if '.hgtags' in x:
313 if '.hgtags' in x:
314 raise util.Abort(_('working copy of .hgtags is changed '
314 raise util.Abort(_('working copy of .hgtags is changed '
315 '(please commit .hgtags manually)'))
315 '(please commit .hgtags manually)'))
316
316
317 self.tags() # instantiate the cache
317 self.tags() # instantiate the cache
318 self._tag(names, node, message, local, user, date)
318 self._tag(names, node, message, local, user, date)
319
319
320 def tags(self):
320 def tags(self):
321 '''return a mapping of tag to node'''
321 '''return a mapping of tag to node'''
322 if self._tags is None:
322 if self._tags is None:
323 (self._tags, self._tagtypes) = self._findtags()
323 (self._tags, self._tagtypes) = self._findtags()
324
324
325 return self._tags
325 return self._tags
326
326
327 def _findtags(self):
327 def _findtags(self):
328 '''Do the hard work of finding tags. Return a pair of dicts
328 '''Do the hard work of finding tags. Return a pair of dicts
329 (tags, tagtypes) where tags maps tag name to node, and tagtypes
329 (tags, tagtypes) where tags maps tag name to node, and tagtypes
330 maps tag name to a string like \'global\' or \'local\'.
330 maps tag name to a string like \'global\' or \'local\'.
331 Subclasses or extensions are free to add their own tags, but
331 Subclasses or extensions are free to add their own tags, but
332 should be aware that the returned dicts will be retained for the
332 should be aware that the returned dicts will be retained for the
333 duration of the localrepo object.'''
333 duration of the localrepo object.'''
334
334
335 # XXX what tagtype should subclasses/extensions use? Currently
335 # XXX what tagtype should subclasses/extensions use? Currently
336 # mq and bookmarks add tags, but do not set the tagtype at all.
336 # mq and bookmarks add tags, but do not set the tagtype at all.
337 # Should each extension invent its own tag type? Should there
337 # Should each extension invent its own tag type? Should there
338 # be one tagtype for all such "virtual" tags? Or is the status
338 # be one tagtype for all such "virtual" tags? Or is the status
339 # quo fine?
339 # quo fine?
340
340
341 alltags = {} # map tag name to (node, hist)
341 alltags = {} # map tag name to (node, hist)
342 tagtypes = {}
342 tagtypes = {}
343
343
344 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
344 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
345 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
345 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
346
346
347 # Build the return dicts. Have to re-encode tag names because
347 # Build the return dicts. Have to re-encode tag names because
348 # the tags module always uses UTF-8 (in order not to lose info
348 # the tags module always uses UTF-8 (in order not to lose info
349 # writing to the cache), but the rest of Mercurial wants them in
349 # writing to the cache), but the rest of Mercurial wants them in
350 # local encoding.
350 # local encoding.
351 tags = {}
351 tags = {}
352 for (name, (node, hist)) in alltags.iteritems():
352 for (name, (node, hist)) in alltags.iteritems():
353 if node != nullid:
353 if node != nullid:
354 tags[encoding.tolocal(name)] = node
354 tags[encoding.tolocal(name)] = node
355 tags['tip'] = self.changelog.tip()
355 tags['tip'] = self.changelog.tip()
356 tagtypes = dict([(encoding.tolocal(name), value)
356 tagtypes = dict([(encoding.tolocal(name), value)
357 for (name, value) in tagtypes.iteritems()])
357 for (name, value) in tagtypes.iteritems()])
358 return (tags, tagtypes)
358 return (tags, tagtypes)
359
359
360 def tagtype(self, tagname):
360 def tagtype(self, tagname):
361 '''
361 '''
362 return the type of the given tag. result can be:
362 return the type of the given tag. result can be:
363
363
364 'local' : a local tag
364 'local' : a local tag
365 'global' : a global tag
365 'global' : a global tag
366 None : tag does not exist
366 None : tag does not exist
367 '''
367 '''
368
368
369 self.tags()
369 self.tags()
370
370
371 return self._tagtypes.get(tagname)
371 return self._tagtypes.get(tagname)
372
372
373 def tagslist(self):
373 def tagslist(self):
374 '''return a list of tags ordered by revision'''
374 '''return a list of tags ordered by revision'''
375 l = []
375 l = []
376 for t, n in self.tags().iteritems():
376 for t, n in self.tags().iteritems():
377 try:
377 try:
378 r = self.changelog.rev(n)
378 r = self.changelog.rev(n)
379 except:
379 except:
380 r = -2 # sort to the beginning of the list if unknown
380 r = -2 # sort to the beginning of the list if unknown
381 l.append((r, t, n))
381 l.append((r, t, n))
382 return [(t, n) for r, t, n in sorted(l)]
382 return [(t, n) for r, t, n in sorted(l)]
383
383
384 def nodetags(self, node):
384 def nodetags(self, node):
385 '''return the tags associated with a node'''
385 '''return the tags associated with a node'''
386 if not self.nodetagscache:
386 if not self.nodetagscache:
387 self.nodetagscache = {}
387 self.nodetagscache = {}
388 for t, n in self.tags().iteritems():
388 for t, n in self.tags().iteritems():
389 self.nodetagscache.setdefault(n, []).append(t)
389 self.nodetagscache.setdefault(n, []).append(t)
390 for tags in self.nodetagscache.itervalues():
390 for tags in self.nodetagscache.itervalues():
391 tags.sort()
391 tags.sort()
392 return self.nodetagscache.get(node, [])
392 return self.nodetagscache.get(node, [])
393
393
394 def _branchtags(self, partial, lrev):
394 def _branchtags(self, partial, lrev):
395 # TODO: rename this function?
395 # TODO: rename this function?
396 tiprev = len(self) - 1
396 tiprev = len(self) - 1
397 if lrev != tiprev:
397 if lrev != tiprev:
398 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
398 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
399 self._updatebranchcache(partial, ctxgen)
399 self._updatebranchcache(partial, ctxgen)
400 self._writebranchcache(partial, self.changelog.tip(), tiprev)
400 self._writebranchcache(partial, self.changelog.tip(), tiprev)
401
401
402 return partial
402 return partial
403
403
404 def updatebranchcache(self):
404 def updatebranchcache(self):
405 tip = self.changelog.tip()
405 tip = self.changelog.tip()
406 if self._branchcache is not None and self._branchcachetip == tip:
406 if self._branchcache is not None and self._branchcachetip == tip:
407 return self._branchcache
407 return self._branchcache
408
408
409 oldtip = self._branchcachetip
409 oldtip = self._branchcachetip
410 self._branchcachetip = tip
410 self._branchcachetip = tip
411 if oldtip is None or oldtip not in self.changelog.nodemap:
411 if oldtip is None or oldtip not in self.changelog.nodemap:
412 partial, last, lrev = self._readbranchcache()
412 partial, last, lrev = self._readbranchcache()
413 else:
413 else:
414 lrev = self.changelog.rev(oldtip)
414 lrev = self.changelog.rev(oldtip)
415 partial = self._branchcache
415 partial = self._branchcache
416
416
417 self._branchtags(partial, lrev)
417 self._branchtags(partial, lrev)
418 # this private cache holds all heads (not just tips)
418 # this private cache holds all heads (not just tips)
419 self._branchcache = partial
419 self._branchcache = partial
420
420
421 def branchmap(self):
421 def branchmap(self):
422 '''returns a dictionary {branch: [branchheads]}'''
422 '''returns a dictionary {branch: [branchheads]}'''
423 self.updatebranchcache()
423 self.updatebranchcache()
424 return self._branchcache
424 return self._branchcache
425
425
426 def branchtags(self):
426 def branchtags(self):
427 '''return a dict where branch names map to the tipmost head of
427 '''return a dict where branch names map to the tipmost head of
428 the branch, open heads come before closed'''
428 the branch, open heads come before closed'''
429 bt = {}
429 bt = {}
430 for bn, heads in self.branchmap().iteritems():
430 for bn, heads in self.branchmap().iteritems():
431 tip = heads[-1]
431 tip = heads[-1]
432 for h in reversed(heads):
432 for h in reversed(heads):
433 if 'close' not in self.changelog.read(h)[5]:
433 if 'close' not in self.changelog.read(h)[5]:
434 tip = h
434 tip = h
435 break
435 break
436 bt[bn] = tip
436 bt[bn] = tip
437 return bt
437 return bt
438
438
439 def _readbranchcache(self):
439 def _readbranchcache(self):
440 partial = {}
440 partial = {}
441 try:
441 try:
442 f = self.opener("branchheads.cache")
442 f = self.opener("branchheads.cache")
443 lines = f.read().split('\n')
443 lines = f.read().split('\n')
444 f.close()
444 f.close()
445 except (IOError, OSError):
445 except (IOError, OSError):
446 return {}, nullid, nullrev
446 return {}, nullid, nullrev
447
447
448 try:
448 try:
449 last, lrev = lines.pop(0).split(" ", 1)
449 last, lrev = lines.pop(0).split(" ", 1)
450 last, lrev = bin(last), int(lrev)
450 last, lrev = bin(last), int(lrev)
451 if lrev >= len(self) or self[lrev].node() != last:
451 if lrev >= len(self) or self[lrev].node() != last:
452 # invalidate the cache
452 # invalidate the cache
453 raise ValueError('invalidating branch cache (tip differs)')
453 raise ValueError('invalidating branch cache (tip differs)')
454 for l in lines:
454 for l in lines:
455 if not l:
455 if not l:
456 continue
456 continue
457 node, label = l.split(" ", 1)
457 node, label = l.split(" ", 1)
458 label = encoding.tolocal(label.strip())
458 label = encoding.tolocal(label.strip())
459 partial.setdefault(label, []).append(bin(node))
459 partial.setdefault(label, []).append(bin(node))
460 except KeyboardInterrupt:
460 except KeyboardInterrupt:
461 raise
461 raise
462 except Exception, inst:
462 except Exception, inst:
463 if self.ui.debugflag:
463 if self.ui.debugflag:
464 self.ui.warn(str(inst), '\n')
464 self.ui.warn(str(inst), '\n')
465 partial, last, lrev = {}, nullid, nullrev
465 partial, last, lrev = {}, nullid, nullrev
466 return partial, last, lrev
466 return partial, last, lrev
467
467
468 def _writebranchcache(self, branches, tip, tiprev):
468 def _writebranchcache(self, branches, tip, tiprev):
469 try:
469 try:
470 f = self.opener("branchheads.cache", "w", atomictemp=True)
470 f = self.opener("branchheads.cache", "w", atomictemp=True)
471 f.write("%s %s\n" % (hex(tip), tiprev))
471 f.write("%s %s\n" % (hex(tip), tiprev))
472 for label, nodes in branches.iteritems():
472 for label, nodes in branches.iteritems():
473 for node in nodes:
473 for node in nodes:
474 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
474 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
475 f.rename()
475 f.rename()
476 except (IOError, OSError):
476 except (IOError, OSError):
477 pass
477 pass
478
478
479 def _updatebranchcache(self, partial, ctxgen):
479 def _updatebranchcache(self, partial, ctxgen):
480 # collect new branch entries
480 # collect new branch entries
481 newbranches = {}
481 newbranches = {}
482 for c in ctxgen:
482 for c in ctxgen:
483 newbranches.setdefault(c.branch(), []).append(c.node())
483 newbranches.setdefault(c.branch(), []).append(c.node())
484 # if older branchheads are reachable from new ones, they aren't
484 # if older branchheads are reachable from new ones, they aren't
485 # really branchheads. Note checking parents is insufficient:
485 # really branchheads. Note checking parents is insufficient:
486 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
486 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
487 for branch, newnodes in newbranches.iteritems():
487 for branch, newnodes in newbranches.iteritems():
488 bheads = partial.setdefault(branch, [])
488 bheads = partial.setdefault(branch, [])
489 bheads.extend(newnodes)
489 bheads.extend(newnodes)
490 if len(bheads) <= 1:
490 if len(bheads) <= 1:
491 continue
491 continue
492 # starting from tip means fewer passes over reachable
492 # starting from tip means fewer passes over reachable
493 while newnodes:
493 while newnodes:
494 latest = newnodes.pop()
494 latest = newnodes.pop()
495 if latest not in bheads:
495 if latest not in bheads:
496 continue
496 continue
497 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
497 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
498 reachable = self.changelog.reachable(latest, minbhrev)
498 reachable = self.changelog.reachable(latest, minbhrev)
499 reachable.remove(latest)
499 reachable.remove(latest)
500 bheads = [b for b in bheads if b not in reachable]
500 bheads = [b for b in bheads if b not in reachable]
501 partial[branch] = bheads
501 partial[branch] = bheads
502
502
503 def lookup(self, key):
503 def lookup(self, key):
504 if isinstance(key, int):
504 if isinstance(key, int):
505 return self.changelog.node(key)
505 return self.changelog.node(key)
506 elif key == '.':
506 elif key == '.':
507 return self.dirstate.parents()[0]
507 return self.dirstate.parents()[0]
508 elif key == 'null':
508 elif key == 'null':
509 return nullid
509 return nullid
510 elif key == 'tip':
510 elif key == 'tip':
511 return self.changelog.tip()
511 return self.changelog.tip()
512 n = self.changelog._match(key)
512 n = self.changelog._match(key)
513 if n:
513 if n:
514 return n
514 return n
515 if key in self.tags():
515 if key in self.tags():
516 return self.tags()[key]
516 return self.tags()[key]
517 if key in self.branchtags():
517 if key in self.branchtags():
518 return self.branchtags()[key]
518 return self.branchtags()[key]
519 n = self.changelog._partialmatch(key)
519 n = self.changelog._partialmatch(key)
520 if n:
520 if n:
521 return n
521 return n
522
522
523 # can't find key, check if it might have come from damaged dirstate
523 # can't find key, check if it might have come from damaged dirstate
524 if key in self.dirstate.parents():
524 if key in self.dirstate.parents():
525 raise error.Abort(_("working directory has unknown parent '%s'!")
525 raise error.Abort(_("working directory has unknown parent '%s'!")
526 % short(key))
526 % short(key))
527 try:
527 try:
528 if len(key) == 20:
528 if len(key) == 20:
529 key = hex(key)
529 key = hex(key)
530 except:
530 except:
531 pass
531 pass
532 raise error.RepoLookupError(_("unknown revision '%s'") % key)
532 raise error.RepoLookupError(_("unknown revision '%s'") % key)
533
533
534 def lookupbranch(self, key, remote=None):
534 def lookupbranch(self, key, remote=None):
535 repo = remote or self
535 repo = remote or self
536 if key in repo.branchmap():
536 if key in repo.branchmap():
537 return key
537 return key
538
538
539 repo = (remote and remote.local()) and remote or self
539 repo = (remote and remote.local()) and remote or self
540 return repo[key].branch()
540 return repo[key].branch()
541
541
542 def local(self):
542 def local(self):
543 return True
543 return True
544
544
545 def join(self, f):
545 def join(self, f):
546 return os.path.join(self.path, f)
546 return os.path.join(self.path, f)
547
547
548 def wjoin(self, f):
548 def wjoin(self, f):
549 return os.path.join(self.root, f)
549 return os.path.join(self.root, f)
550
550
551 def file(self, f):
551 def file(self, f):
552 if f[0] == '/':
552 if f[0] == '/':
553 f = f[1:]
553 f = f[1:]
554 return filelog.filelog(self.sopener, f)
554 return filelog.filelog(self.sopener, f)
555
555
556 def changectx(self, changeid):
556 def changectx(self, changeid):
557 return self[changeid]
557 return self[changeid]
558
558
559 def parents(self, changeid=None):
559 def parents(self, changeid=None):
560 '''get list of changectxs for parents of changeid'''
560 '''get list of changectxs for parents of changeid'''
561 return self[changeid].parents()
561 return self[changeid].parents()
562
562
563 def filectx(self, path, changeid=None, fileid=None):
563 def filectx(self, path, changeid=None, fileid=None):
564 """changeid can be a changeset revision, node, or tag.
564 """changeid can be a changeset revision, node, or tag.
565 fileid can be a file revision or node."""
565 fileid can be a file revision or node."""
566 return context.filectx(self, path, changeid, fileid)
566 return context.filectx(self, path, changeid, fileid)
567
567
568 def getcwd(self):
568 def getcwd(self):
569 return self.dirstate.getcwd()
569 return self.dirstate.getcwd()
570
570
571 def pathto(self, f, cwd=None):
571 def pathto(self, f, cwd=None):
572 return self.dirstate.pathto(f, cwd)
572 return self.dirstate.pathto(f, cwd)
573
573
574 def wfile(self, f, mode='r'):
574 def wfile(self, f, mode='r'):
575 return self.wopener(f, mode)
575 return self.wopener(f, mode)
576
576
577 def _link(self, f):
577 def _link(self, f):
578 return os.path.islink(self.wjoin(f))
578 return os.path.islink(self.wjoin(f))
579
579
580 def _loadfilter(self, filter):
580 def _loadfilter(self, filter):
581 if filter not in self.filterpats:
581 if filter not in self.filterpats:
582 l = []
582 l = []
583 for pat, cmd in self.ui.configitems(filter):
583 for pat, cmd in self.ui.configitems(filter):
584 if cmd == '!':
584 if cmd == '!':
585 continue
585 continue
586 mf = matchmod.match(self.root, '', [pat])
586 mf = matchmod.match(self.root, '', [pat])
587 fn = None
587 fn = None
588 params = cmd
588 params = cmd
589 for name, filterfn in self._datafilters.iteritems():
589 for name, filterfn in self._datafilters.iteritems():
590 if cmd.startswith(name):
590 if cmd.startswith(name):
591 fn = filterfn
591 fn = filterfn
592 params = cmd[len(name):].lstrip()
592 params = cmd[len(name):].lstrip()
593 break
593 break
594 if not fn:
594 if not fn:
595 fn = lambda s, c, **kwargs: util.filter(s, c)
595 fn = lambda s, c, **kwargs: util.filter(s, c)
596 # Wrap old filters not supporting keyword arguments
596 # Wrap old filters not supporting keyword arguments
597 if not inspect.getargspec(fn)[2]:
597 if not inspect.getargspec(fn)[2]:
598 oldfn = fn
598 oldfn = fn
599 fn = lambda s, c, **kwargs: oldfn(s, c)
599 fn = lambda s, c, **kwargs: oldfn(s, c)
600 l.append((mf, fn, params))
600 l.append((mf, fn, params))
601 self.filterpats[filter] = l
601 self.filterpats[filter] = l
602 return self.filterpats[filter]
602 return self.filterpats[filter]
603
603
604 def _filter(self, filterpats, filename, data):
604 def _filter(self, filterpats, filename, data):
605 for mf, fn, cmd in filterpats:
605 for mf, fn, cmd in filterpats:
606 if mf(filename):
606 if mf(filename):
607 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
607 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
608 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
608 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
609 break
609 break
610
610
611 return data
611 return data
612
612
613 @propertycache
613 @propertycache
614 def _encodefilterpats(self):
614 def _encodefilterpats(self):
615 return self._loadfilter('encode')
615 return self._loadfilter('encode')
616
616
617 @propertycache
617 @propertycache
618 def _decodefilterpats(self):
618 def _decodefilterpats(self):
619 return self._loadfilter('decode')
619 return self._loadfilter('decode')
620
620
621 def adddatafilter(self, name, filter):
621 def adddatafilter(self, name, filter):
622 self._datafilters[name] = filter
622 self._datafilters[name] = filter
623
623
624 def wread(self, filename):
624 def wread(self, filename):
625 if self._link(filename):
625 if self._link(filename):
626 data = os.readlink(self.wjoin(filename))
626 data = os.readlink(self.wjoin(filename))
627 else:
627 else:
628 data = self.wopener(filename, 'r').read()
628 data = self.wopener(filename, 'r').read()
629 return self._filter(self._encodefilterpats, filename, data)
629 return self._filter(self._encodefilterpats, filename, data)
630
630
631 def wwrite(self, filename, data, flags):
631 def wwrite(self, filename, data, flags):
632 data = self._filter(self._decodefilterpats, filename, data)
632 data = self._filter(self._decodefilterpats, filename, data)
633 if 'l' in flags:
633 if 'l' in flags:
634 self.wopener.symlink(data, filename)
634 self.wopener.symlink(data, filename)
635 else:
635 else:
636 self.wopener(filename, 'w').write(data)
636 self.wopener(filename, 'w').write(data)
637 if 'x' in flags:
637 if 'x' in flags:
638 util.set_flags(self.wjoin(filename), False, True)
638 util.set_flags(self.wjoin(filename), False, True)
639
639
640 def wwritedata(self, filename, data):
640 def wwritedata(self, filename, data):
641 return self._filter(self._decodefilterpats, filename, data)
641 return self._filter(self._decodefilterpats, filename, data)
642
642
643 def transaction(self, desc):
643 def transaction(self, desc):
644 tr = self._transref and self._transref() or None
644 tr = self._transref and self._transref() or None
645 if tr and tr.running():
645 if tr and tr.running():
646 return tr.nest()
646 return tr.nest()
647
647
648 # abort here if the journal already exists
648 # abort here if the journal already exists
649 if os.path.exists(self.sjoin("journal")):
649 if os.path.exists(self.sjoin("journal")):
650 raise error.RepoError(
650 raise error.RepoError(
651 _("abandoned transaction found - run hg recover"))
651 _("abandoned transaction found - run hg recover"))
652
652
653 # save dirstate for rollback
653 # save dirstate for rollback
654 try:
654 try:
655 ds = self.opener("dirstate").read()
655 ds = self.opener("dirstate").read()
656 except IOError:
656 except IOError:
657 ds = ""
657 ds = ""
658 self.opener("journal.dirstate", "w").write(ds)
658 self.opener("journal.dirstate", "w").write(ds)
659 self.opener("journal.branch", "w").write(
659 self.opener("journal.branch", "w").write(
660 encoding.fromlocal(self.dirstate.branch()))
660 encoding.fromlocal(self.dirstate.branch()))
661 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
661 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
662
662
663 renames = [(self.sjoin("journal"), self.sjoin("undo")),
663 renames = [(self.sjoin("journal"), self.sjoin("undo")),
664 (self.join("journal.dirstate"), self.join("undo.dirstate")),
664 (self.join("journal.dirstate"), self.join("undo.dirstate")),
665 (self.join("journal.branch"), self.join("undo.branch")),
665 (self.join("journal.branch"), self.join("undo.branch")),
666 (self.join("journal.desc"), self.join("undo.desc"))]
666 (self.join("journal.desc"), self.join("undo.desc"))]
667 tr = transaction.transaction(self.ui.warn, self.sopener,
667 tr = transaction.transaction(self.ui.warn, self.sopener,
668 self.sjoin("journal"),
668 self.sjoin("journal"),
669 aftertrans(renames),
669 aftertrans(renames),
670 self.store.createmode)
670 self.store.createmode)
671 self._transref = weakref.ref(tr)
671 self._transref = weakref.ref(tr)
672 return tr
672 return tr
673
673
674 def recover(self):
674 def recover(self):
675 lock = self.lock()
675 lock = self.lock()
676 try:
676 try:
677 if os.path.exists(self.sjoin("journal")):
677 if os.path.exists(self.sjoin("journal")):
678 self.ui.status(_("rolling back interrupted transaction\n"))
678 self.ui.status(_("rolling back interrupted transaction\n"))
679 transaction.rollback(self.sopener, self.sjoin("journal"),
679 transaction.rollback(self.sopener, self.sjoin("journal"),
680 self.ui.warn)
680 self.ui.warn)
681 self.invalidate()
681 self.invalidate()
682 return True
682 return True
683 else:
683 else:
684 self.ui.warn(_("no interrupted transaction available\n"))
684 self.ui.warn(_("no interrupted transaction available\n"))
685 return False
685 return False
686 finally:
686 finally:
687 lock.release()
687 lock.release()
688
688
689 def rollback(self, dryrun=False):
689 def rollback(self, dryrun=False):
690 wlock = lock = None
690 wlock = lock = None
691 try:
691 try:
692 wlock = self.wlock()
692 wlock = self.wlock()
693 lock = self.lock()
693 lock = self.lock()
694 if os.path.exists(self.sjoin("undo")):
694 if os.path.exists(self.sjoin("undo")):
695 try:
695 try:
696 args = self.opener("undo.desc", "r").read().splitlines()
696 args = self.opener("undo.desc", "r").read().splitlines()
697 if len(args) >= 3 and self.ui.verbose:
697 if len(args) >= 3 and self.ui.verbose:
698 desc = _("rolling back to revision %s"
698 desc = _("rolling back to revision %s"
699 " (undo %s: %s)\n") % (
699 " (undo %s: %s)\n") % (
700 int(args[0]) - 1, args[1], args[2])
700 int(args[0]) - 1, args[1], args[2])
701 elif len(args) >= 2:
701 elif len(args) >= 2:
702 desc = _("rolling back to revision %s (undo %s)\n") % (
702 desc = _("rolling back to revision %s (undo %s)\n") % (
703 int(args[0]) - 1, args[1])
703 int(args[0]) - 1, args[1])
704 except IOError:
704 except IOError:
705 desc = _("rolling back unknown transaction\n")
705 desc = _("rolling back unknown transaction\n")
706 self.ui.status(desc)
706 self.ui.status(desc)
707 if dryrun:
707 if dryrun:
708 return
708 return
709 transaction.rollback(self.sopener, self.sjoin("undo"),
709 transaction.rollback(self.sopener, self.sjoin("undo"),
710 self.ui.warn)
710 self.ui.warn)
711 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
711 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
712 try:
712 try:
713 branch = self.opener("undo.branch").read()
713 branch = self.opener("undo.branch").read()
714 self.dirstate.setbranch(branch)
714 self.dirstate.setbranch(branch)
715 except IOError:
715 except IOError:
716 self.ui.warn(_("Named branch could not be reset, "
716 self.ui.warn(_("Named branch could not be reset, "
717 "current branch still is: %s\n")
717 "current branch still is: %s\n")
718 % self.dirstate.branch())
718 % self.dirstate.branch())
719 self.invalidate()
719 self.invalidate()
720 self.dirstate.invalidate()
720 self.dirstate.invalidate()
721 self.destroyed()
721 self.destroyed()
722 else:
722 else:
723 self.ui.warn(_("no rollback information available\n"))
723 self.ui.warn(_("no rollback information available\n"))
724 return 1
724 return 1
725 finally:
725 finally:
726 release(lock, wlock)
726 release(lock, wlock)
727
727
728 def invalidatecaches(self):
728 def invalidatecaches(self):
729 self._tags = None
729 self._tags = None
730 self._tagtypes = None
730 self._tagtypes = None
731 self.nodetagscache = None
731 self.nodetagscache = None
732 self._branchcache = None # in UTF-8
732 self._branchcache = None # in UTF-8
733 self._branchcachetip = None
733 self._branchcachetip = None
734
734
735 def invalidate(self):
735 def invalidate(self):
736 for a in "changelog manifest".split():
736 for a in "changelog manifest".split():
737 if a in self.__dict__:
737 if a in self.__dict__:
738 delattr(self, a)
738 delattr(self, a)
739 self.invalidatecaches()
739 self.invalidatecaches()
740
740
741 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
741 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
742 try:
742 try:
743 l = lock.lock(lockname, 0, releasefn, desc=desc)
743 l = lock.lock(lockname, 0, releasefn, desc=desc)
744 except error.LockHeld, inst:
744 except error.LockHeld, inst:
745 if not wait:
745 if not wait:
746 raise
746 raise
747 self.ui.warn(_("waiting for lock on %s held by %r\n") %
747 self.ui.warn(_("waiting for lock on %s held by %r\n") %
748 (desc, inst.locker))
748 (desc, inst.locker))
749 # default to 600 seconds timeout
749 # default to 600 seconds timeout
750 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
750 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
751 releasefn, desc=desc)
751 releasefn, desc=desc)
752 if acquirefn:
752 if acquirefn:
753 acquirefn()
753 acquirefn()
754 return l
754 return l
755
755
756 def lock(self, wait=True):
756 def lock(self, wait=True):
757 '''Lock the repository store (.hg/store) and return a weak reference
757 '''Lock the repository store (.hg/store) and return a weak reference
758 to the lock. Use this before modifying the store (e.g. committing or
758 to the lock. Use this before modifying the store (e.g. committing or
759 stripping). If you are opening a transaction, get a lock as well.)'''
759 stripping). If you are opening a transaction, get a lock as well.)'''
760 l = self._lockref and self._lockref()
760 l = self._lockref and self._lockref()
761 if l is not None and l.held:
761 if l is not None and l.held:
762 l.lock()
762 l.lock()
763 return l
763 return l
764
764
765 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
765 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
766 _('repository %s') % self.origroot)
766 _('repository %s') % self.origroot)
767 self._lockref = weakref.ref(l)
767 self._lockref = weakref.ref(l)
768 return l
768 return l
769
769
770 def wlock(self, wait=True):
770 def wlock(self, wait=True):
771 '''Lock the non-store parts of the repository (everything under
771 '''Lock the non-store parts of the repository (everything under
772 .hg except .hg/store) and return a weak reference to the lock.
772 .hg except .hg/store) and return a weak reference to the lock.
773 Use this before modifying files in .hg.'''
773 Use this before modifying files in .hg.'''
774 l = self._wlockref and self._wlockref()
774 l = self._wlockref and self._wlockref()
775 if l is not None and l.held:
775 if l is not None and l.held:
776 l.lock()
776 l.lock()
777 return l
777 return l
778
778
779 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
779 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
780 self.dirstate.invalidate, _('working directory of %s') %
780 self.dirstate.invalidate, _('working directory of %s') %
781 self.origroot)
781 self.origroot)
782 self._wlockref = weakref.ref(l)
782 self._wlockref = weakref.ref(l)
783 return l
783 return l
784
784
785 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
785 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
786 """
786 """
787 commit an individual file as part of a larger transaction
787 commit an individual file as part of a larger transaction
788 """
788 """
789
789
790 fname = fctx.path()
790 fname = fctx.path()
791 text = fctx.data()
791 text = fctx.data()
792 flog = self.file(fname)
792 flog = self.file(fname)
793 fparent1 = manifest1.get(fname, nullid)
793 fparent1 = manifest1.get(fname, nullid)
794 fparent2 = fparent2o = manifest2.get(fname, nullid)
794 fparent2 = fparent2o = manifest2.get(fname, nullid)
795
795
796 meta = {}
796 meta = {}
797 copy = fctx.renamed()
797 copy = fctx.renamed()
798 if copy and copy[0] != fname:
798 if copy and copy[0] != fname:
799 # Mark the new revision of this file as a copy of another
799 # Mark the new revision of this file as a copy of another
800 # file. This copy data will effectively act as a parent
800 # file. This copy data will effectively act as a parent
801 # of this new revision. If this is a merge, the first
801 # of this new revision. If this is a merge, the first
802 # parent will be the nullid (meaning "look up the copy data")
802 # parent will be the nullid (meaning "look up the copy data")
803 # and the second one will be the other parent. For example:
803 # and the second one will be the other parent. For example:
804 #
804 #
805 # 0 --- 1 --- 3 rev1 changes file foo
805 # 0 --- 1 --- 3 rev1 changes file foo
806 # \ / rev2 renames foo to bar and changes it
806 # \ / rev2 renames foo to bar and changes it
807 # \- 2 -/ rev3 should have bar with all changes and
807 # \- 2 -/ rev3 should have bar with all changes and
808 # should record that bar descends from
808 # should record that bar descends from
809 # bar in rev2 and foo in rev1
809 # bar in rev2 and foo in rev1
810 #
810 #
811 # this allows this merge to succeed:
811 # this allows this merge to succeed:
812 #
812 #
813 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
813 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
814 # \ / merging rev3 and rev4 should use bar@rev2
814 # \ / merging rev3 and rev4 should use bar@rev2
815 # \- 2 --- 4 as the merge base
815 # \- 2 --- 4 as the merge base
816 #
816 #
817
817
818 cfname = copy[0]
818 cfname = copy[0]
819 crev = manifest1.get(cfname)
819 crev = manifest1.get(cfname)
820 newfparent = fparent2
820 newfparent = fparent2
821
821
822 if manifest2: # branch merge
822 if manifest2: # branch merge
823 if fparent2 == nullid or crev is None: # copied on remote side
823 if fparent2 == nullid or crev is None: # copied on remote side
824 if cfname in manifest2:
824 if cfname in manifest2:
825 crev = manifest2[cfname]
825 crev = manifest2[cfname]
826 newfparent = fparent1
826 newfparent = fparent1
827
827
828 # find source in nearest ancestor if we've lost track
828 # find source in nearest ancestor if we've lost track
829 if not crev:
829 if not crev:
830 self.ui.debug(" %s: searching for copy revision for %s\n" %
830 self.ui.debug(" %s: searching for copy revision for %s\n" %
831 (fname, cfname))
831 (fname, cfname))
832 for ancestor in self[None].ancestors():
832 for ancestor in self[None].ancestors():
833 if cfname in ancestor:
833 if cfname in ancestor:
834 crev = ancestor[cfname].filenode()
834 crev = ancestor[cfname].filenode()
835 break
835 break
836
836
837 if crev:
837 if crev:
838 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
838 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
839 meta["copy"] = cfname
839 meta["copy"] = cfname
840 meta["copyrev"] = hex(crev)
840 meta["copyrev"] = hex(crev)
841 fparent1, fparent2 = nullid, newfparent
841 fparent1, fparent2 = nullid, newfparent
842 else:
842 else:
843 self.ui.warn(_("warning: can't find ancestor for '%s' "
843 self.ui.warn(_("warning: can't find ancestor for '%s' "
844 "copied from '%s'!\n") % (fname, cfname))
844 "copied from '%s'!\n") % (fname, cfname))
845
845
846 elif fparent2 != nullid:
846 elif fparent2 != nullid:
847 # is one parent an ancestor of the other?
847 # is one parent an ancestor of the other?
848 fparentancestor = flog.ancestor(fparent1, fparent2)
848 fparentancestor = flog.ancestor(fparent1, fparent2)
849 if fparentancestor == fparent1:
849 if fparentancestor == fparent1:
850 fparent1, fparent2 = fparent2, nullid
850 fparent1, fparent2 = fparent2, nullid
851 elif fparentancestor == fparent2:
851 elif fparentancestor == fparent2:
852 fparent2 = nullid
852 fparent2 = nullid
853
853
854 # is the file changed?
854 # is the file changed?
855 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
855 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
856 changelist.append(fname)
856 changelist.append(fname)
857 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
857 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
858
858
859 # are just the flags changed during merge?
859 # are just the flags changed during merge?
860 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
860 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
861 changelist.append(fname)
861 changelist.append(fname)
862
862
863 return fparent1
863 return fparent1
864
864
865 def commit(self, text="", user=None, date=None, match=None, force=False,
865 def commit(self, text="", user=None, date=None, match=None, force=False,
866 editor=False, extra={}):
866 editor=False, extra={}):
867 """Add a new revision to current repository.
867 """Add a new revision to current repository.
868
868
869 Revision information is gathered from the working directory,
869 Revision information is gathered from the working directory,
870 match can be used to filter the committed files. If editor is
870 match can be used to filter the committed files. If editor is
871 supplied, it is called to get a commit message.
871 supplied, it is called to get a commit message.
872 """
872 """
873
873
874 def fail(f, msg):
874 def fail(f, msg):
875 raise util.Abort('%s: %s' % (f, msg))
875 raise util.Abort('%s: %s' % (f, msg))
876
876
877 if not match:
877 if not match:
878 match = matchmod.always(self.root, '')
878 match = matchmod.always(self.root, '')
879
879
880 if not force:
880 if not force:
881 vdirs = []
881 vdirs = []
882 match.dir = vdirs.append
882 match.dir = vdirs.append
883 match.bad = fail
883 match.bad = fail
884
884
885 wlock = self.wlock()
885 wlock = self.wlock()
886 try:
886 try:
887 wctx = self[None]
887 wctx = self[None]
888 merge = len(wctx.parents()) > 1
888 merge = len(wctx.parents()) > 1
889
889
890 if (not force and merge and match and
890 if (not force and merge and match and
891 (match.files() or match.anypats())):
891 (match.files() or match.anypats())):
892 raise util.Abort(_('cannot partially commit a merge '
892 raise util.Abort(_('cannot partially commit a merge '
893 '(do not specify files or patterns)'))
893 '(do not specify files or patterns)'))
894
894
895 changes = self.status(match=match, clean=force)
895 changes = self.status(match=match, clean=force)
896 if force:
896 if force:
897 changes[0].extend(changes[6]) # mq may commit unchanged files
897 changes[0].extend(changes[6]) # mq may commit unchanged files
898
898
899 # check subrepos
899 # check subrepos
900 subs = []
900 subs = []
901 removedsubs = set()
901 removedsubs = set()
902 for p in wctx.parents():
902 for p in wctx.parents():
903 removedsubs.update(s for s in p.substate if match(s))
903 removedsubs.update(s for s in p.substate if match(s))
904 for s in wctx.substate:
904 for s in wctx.substate:
905 removedsubs.discard(s)
905 removedsubs.discard(s)
906 if match(s) and wctx.sub(s).dirty():
906 if match(s) and wctx.sub(s).dirty():
907 subs.append(s)
907 subs.append(s)
908 if (subs or removedsubs):
908 if (subs or removedsubs):
909 if (not match('.hgsub') and
909 if (not match('.hgsub') and
910 '.hgsub' in (wctx.modified() + wctx.added())):
910 '.hgsub' in (wctx.modified() + wctx.added())):
911 raise util.Abort(_("can't commit subrepos without .hgsub"))
911 raise util.Abort(_("can't commit subrepos without .hgsub"))
912 if '.hgsubstate' not in changes[0]:
912 if '.hgsubstate' not in changes[0]:
913 changes[0].insert(0, '.hgsubstate')
913 changes[0].insert(0, '.hgsubstate')
914
914
915 # make sure all explicit patterns are matched
915 # make sure all explicit patterns are matched
916 if not force and match.files():
916 if not force and match.files():
917 matched = set(changes[0] + changes[1] + changes[2])
917 matched = set(changes[0] + changes[1] + changes[2])
918
918
919 for f in match.files():
919 for f in match.files():
920 if f == '.' or f in matched or f in wctx.substate:
920 if f == '.' or f in matched or f in wctx.substate:
921 continue
921 continue
922 if f in changes[3]: # missing
922 if f in changes[3]: # missing
923 fail(f, _('file not found!'))
923 fail(f, _('file not found!'))
924 if f in vdirs: # visited directory
924 if f in vdirs: # visited directory
925 d = f + '/'
925 d = f + '/'
926 for mf in matched:
926 for mf in matched:
927 if mf.startswith(d):
927 if mf.startswith(d):
928 break
928 break
929 else:
929 else:
930 fail(f, _("no match under directory!"))
930 fail(f, _("no match under directory!"))
931 elif f not in self.dirstate:
931 elif f not in self.dirstate:
932 fail(f, _("file not tracked!"))
932 fail(f, _("file not tracked!"))
933
933
934 if (not force and not extra.get("close") and not merge
934 if (not force and not extra.get("close") and not merge
935 and not (changes[0] or changes[1] or changes[2])
935 and not (changes[0] or changes[1] or changes[2])
936 and wctx.branch() == wctx.p1().branch()):
936 and wctx.branch() == wctx.p1().branch()):
937 return None
937 return None
938
938
939 ms = mergemod.mergestate(self)
939 ms = mergemod.mergestate(self)
940 for f in changes[0]:
940 for f in changes[0]:
941 if f in ms and ms[f] == 'u':
941 if f in ms and ms[f] == 'u':
942 raise util.Abort(_("unresolved merge conflicts "
942 raise util.Abort(_("unresolved merge conflicts "
943 "(see hg resolve)"))
943 "(see hg resolve)"))
944
944
945 cctx = context.workingctx(self, text, user, date, extra, changes)
945 cctx = context.workingctx(self, text, user, date, extra, changes)
946 if editor:
946 if editor:
947 cctx._text = editor(self, cctx, subs)
947 cctx._text = editor(self, cctx, subs)
948 edited = (text != cctx._text)
948 edited = (text != cctx._text)
949
949
950 # commit subs
950 # commit subs
951 if subs or removedsubs:
951 if subs or removedsubs:
952 pstate = subrepo.substate(self['.'])
953 state = wctx.substate.copy()
952 state = wctx.substate.copy()
954 for s in sorted(subs):
953 for s in sorted(subs):
955 sub = wctx.sub(s)
954 sub = wctx.sub(s)
956 self.ui.status(_('committing subrepository %s\n') %
955 self.ui.status(_('committing subrepository %s\n') %
957 subrepo.subrelpath(sub))
956 subrepo.subrelpath(sub))
958 sr = sub.commit(cctx._text, user, date)
957 sr = sub.commit(cctx._text, user, date)
959 state[s] = (state[s][0], sr)
958 state[s] = (state[s][0], sr)
960
959 subrepo.writestate(self, state)
961 changed = False
962 if len(pstate) != len(state):
963 changed = True
964 if not changed:
965 for newstate in state:
966 if state[newstate][1] != pstate[newstate]:
967 changed = True
968 if changed:
969 subrepo.writestate(self, state)
970 elif (changes[0] == ['.hgsubstate'] and changes[1] == [] and
971 changes[2] == []):
972 return None
973
960
974 # Save commit message in case this transaction gets rolled back
961 # Save commit message in case this transaction gets rolled back
975 # (e.g. by a pretxncommit hook). Leave the content alone on
962 # (e.g. by a pretxncommit hook). Leave the content alone on
976 # the assumption that the user will use the same editor again.
963 # the assumption that the user will use the same editor again.
977 msgfile = self.opener('last-message.txt', 'wb')
964 msgfile = self.opener('last-message.txt', 'wb')
978 msgfile.write(cctx._text)
965 msgfile.write(cctx._text)
979 msgfile.close()
966 msgfile.close()
980
967
981 p1, p2 = self.dirstate.parents()
968 p1, p2 = self.dirstate.parents()
982 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
969 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
983 try:
970 try:
984 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
971 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
985 ret = self.commitctx(cctx, True)
972 ret = self.commitctx(cctx, True)
986 except:
973 except:
987 if edited:
974 if edited:
988 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
975 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
989 self.ui.write(
976 self.ui.write(
990 _('note: commit message saved in %s\n') % msgfn)
977 _('note: commit message saved in %s\n') % msgfn)
991 raise
978 raise
992
979
993 # update dirstate and mergestate
980 # update dirstate and mergestate
994 for f in changes[0] + changes[1]:
981 for f in changes[0] + changes[1]:
995 self.dirstate.normal(f)
982 self.dirstate.normal(f)
996 for f in changes[2]:
983 for f in changes[2]:
997 self.dirstate.forget(f)
984 self.dirstate.forget(f)
998 self.dirstate.setparents(ret)
985 self.dirstate.setparents(ret)
999 ms.reset()
986 ms.reset()
1000 finally:
987 finally:
1001 wlock.release()
988 wlock.release()
1002
989
1003 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
990 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1004 return ret
991 return ret
1005
992
1006 def commitctx(self, ctx, error=False):
993 def commitctx(self, ctx, error=False):
1007 """Add a new revision to current repository.
994 """Add a new revision to current repository.
1008 Revision information is passed via the context argument.
995 Revision information is passed via the context argument.
1009 """
996 """
1010
997
1011 tr = lock = None
998 tr = lock = None
1012 removed = list(ctx.removed())
999 removed = list(ctx.removed())
1013 p1, p2 = ctx.p1(), ctx.p2()
1000 p1, p2 = ctx.p1(), ctx.p2()
1014 m1 = p1.manifest().copy()
1001 m1 = p1.manifest().copy()
1015 m2 = p2.manifest()
1002 m2 = p2.manifest()
1016 user = ctx.user()
1003 user = ctx.user()
1017
1004
1018 lock = self.lock()
1005 lock = self.lock()
1019 try:
1006 try:
1020 tr = self.transaction("commit")
1007 tr = self.transaction("commit")
1021 trp = weakref.proxy(tr)
1008 trp = weakref.proxy(tr)
1022
1009
1023 # check in files
1010 # check in files
1024 new = {}
1011 new = {}
1025 changed = []
1012 changed = []
1026 linkrev = len(self)
1013 linkrev = len(self)
1027 for f in sorted(ctx.modified() + ctx.added()):
1014 for f in sorted(ctx.modified() + ctx.added()):
1028 self.ui.note(f + "\n")
1015 self.ui.note(f + "\n")
1029 try:
1016 try:
1030 fctx = ctx[f]
1017 fctx = ctx[f]
1031 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1018 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1032 changed)
1019 changed)
1033 m1.set(f, fctx.flags())
1020 m1.set(f, fctx.flags())
1034 except OSError, inst:
1021 except OSError, inst:
1035 self.ui.warn(_("trouble committing %s!\n") % f)
1022 self.ui.warn(_("trouble committing %s!\n") % f)
1036 raise
1023 raise
1037 except IOError, inst:
1024 except IOError, inst:
1038 errcode = getattr(inst, 'errno', errno.ENOENT)
1025 errcode = getattr(inst, 'errno', errno.ENOENT)
1039 if error or errcode and errcode != errno.ENOENT:
1026 if error or errcode and errcode != errno.ENOENT:
1040 self.ui.warn(_("trouble committing %s!\n") % f)
1027 self.ui.warn(_("trouble committing %s!\n") % f)
1041 raise
1028 raise
1042 else:
1029 else:
1043 removed.append(f)
1030 removed.append(f)
1044
1031
1045 # update manifest
1032 # update manifest
1046 m1.update(new)
1033 m1.update(new)
1047 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1034 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1048 drop = [f for f in removed if f in m1]
1035 drop = [f for f in removed if f in m1]
1049 for f in drop:
1036 for f in drop:
1050 del m1[f]
1037 del m1[f]
1051 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1038 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1052 p2.manifestnode(), (new, drop))
1039 p2.manifestnode(), (new, drop))
1053
1040
1054 # update changelog
1041 # update changelog
1055 self.changelog.delayupdate()
1042 self.changelog.delayupdate()
1056 n = self.changelog.add(mn, changed + removed, ctx.description(),
1043 n = self.changelog.add(mn, changed + removed, ctx.description(),
1057 trp, p1.node(), p2.node(),
1044 trp, p1.node(), p2.node(),
1058 user, ctx.date(), ctx.extra().copy())
1045 user, ctx.date(), ctx.extra().copy())
1059 p = lambda: self.changelog.writepending() and self.root or ""
1046 p = lambda: self.changelog.writepending() and self.root or ""
1060 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1047 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1061 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1048 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1062 parent2=xp2, pending=p)
1049 parent2=xp2, pending=p)
1063 self.changelog.finalize(trp)
1050 self.changelog.finalize(trp)
1064 tr.close()
1051 tr.close()
1065
1052
1066 if self._branchcache:
1053 if self._branchcache:
1067 self.updatebranchcache()
1054 self.updatebranchcache()
1068 return n
1055 return n
1069 finally:
1056 finally:
1070 if tr:
1057 if tr:
1071 tr.release()
1058 tr.release()
1072 lock.release()
1059 lock.release()
1073
1060
1074 def destroyed(self):
1061 def destroyed(self):
1075 '''Inform the repository that nodes have been destroyed.
1062 '''Inform the repository that nodes have been destroyed.
1076 Intended for use by strip and rollback, so there's a common
1063 Intended for use by strip and rollback, so there's a common
1077 place for anything that has to be done after destroying history.'''
1064 place for anything that has to be done after destroying history.'''
1078 # XXX it might be nice if we could take the list of destroyed
1065 # XXX it might be nice if we could take the list of destroyed
1079 # nodes, but I don't see an easy way for rollback() to do that
1066 # nodes, but I don't see an easy way for rollback() to do that
1080
1067
1081 # Ensure the persistent tag cache is updated. Doing it now
1068 # Ensure the persistent tag cache is updated. Doing it now
1082 # means that the tag cache only has to worry about destroyed
1069 # means that the tag cache only has to worry about destroyed
1083 # heads immediately after a strip/rollback. That in turn
1070 # heads immediately after a strip/rollback. That in turn
1084 # guarantees that "cachetip == currenttip" (comparing both rev
1071 # guarantees that "cachetip == currenttip" (comparing both rev
1085 # and node) always means no nodes have been added or destroyed.
1072 # and node) always means no nodes have been added or destroyed.
1086
1073
1087 # XXX this is suboptimal when qrefresh'ing: we strip the current
1074 # XXX this is suboptimal when qrefresh'ing: we strip the current
1088 # head, refresh the tag cache, then immediately add a new head.
1075 # head, refresh the tag cache, then immediately add a new head.
1089 # But I think doing it this way is necessary for the "instant
1076 # But I think doing it this way is necessary for the "instant
1090 # tag cache retrieval" case to work.
1077 # tag cache retrieval" case to work.
1091 self.invalidatecaches()
1078 self.invalidatecaches()
1092
1079
1093 def walk(self, match, node=None):
1080 def walk(self, match, node=None):
1094 '''
1081 '''
1095 walk recursively through the directory tree or a given
1082 walk recursively through the directory tree or a given
1096 changeset, finding all files matched by the match
1083 changeset, finding all files matched by the match
1097 function
1084 function
1098 '''
1085 '''
1099 return self[node].walk(match)
1086 return self[node].walk(match)
1100
1087
1101 def status(self, node1='.', node2=None, match=None,
1088 def status(self, node1='.', node2=None, match=None,
1102 ignored=False, clean=False, unknown=False,
1089 ignored=False, clean=False, unknown=False,
1103 listsubrepos=False):
1090 listsubrepos=False):
1104 """return status of files between two nodes or node and working directory
1091 """return status of files between two nodes or node and working directory
1105
1092
1106 If node1 is None, use the first dirstate parent instead.
1093 If node1 is None, use the first dirstate parent instead.
1107 If node2 is None, compare node1 with working directory.
1094 If node2 is None, compare node1 with working directory.
1108 """
1095 """
1109
1096
1110 def mfmatches(ctx):
1097 def mfmatches(ctx):
1111 mf = ctx.manifest().copy()
1098 mf = ctx.manifest().copy()
1112 for fn in mf.keys():
1099 for fn in mf.keys():
1113 if not match(fn):
1100 if not match(fn):
1114 del mf[fn]
1101 del mf[fn]
1115 return mf
1102 return mf
1116
1103
1117 if isinstance(node1, context.changectx):
1104 if isinstance(node1, context.changectx):
1118 ctx1 = node1
1105 ctx1 = node1
1119 else:
1106 else:
1120 ctx1 = self[node1]
1107 ctx1 = self[node1]
1121 if isinstance(node2, context.changectx):
1108 if isinstance(node2, context.changectx):
1122 ctx2 = node2
1109 ctx2 = node2
1123 else:
1110 else:
1124 ctx2 = self[node2]
1111 ctx2 = self[node2]
1125
1112
1126 working = ctx2.rev() is None
1113 working = ctx2.rev() is None
1127 parentworking = working and ctx1 == self['.']
1114 parentworking = working and ctx1 == self['.']
1128 match = match or matchmod.always(self.root, self.getcwd())
1115 match = match or matchmod.always(self.root, self.getcwd())
1129 listignored, listclean, listunknown = ignored, clean, unknown
1116 listignored, listclean, listunknown = ignored, clean, unknown
1130
1117
1131 # load earliest manifest first for caching reasons
1118 # load earliest manifest first for caching reasons
1132 if not working and ctx2.rev() < ctx1.rev():
1119 if not working and ctx2.rev() < ctx1.rev():
1133 ctx2.manifest()
1120 ctx2.manifest()
1134
1121
1135 if not parentworking:
1122 if not parentworking:
1136 def bad(f, msg):
1123 def bad(f, msg):
1137 if f not in ctx1:
1124 if f not in ctx1:
1138 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1125 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1139 match.bad = bad
1126 match.bad = bad
1140
1127
1141 if working: # we need to scan the working dir
1128 if working: # we need to scan the working dir
1142 subrepos = []
1129 subrepos = []
1143 if '.hgsub' in self.dirstate:
1130 if '.hgsub' in self.dirstate:
1144 subrepos = ctx1.substate.keys()
1131 subrepos = ctx1.substate.keys()
1145 s = self.dirstate.status(match, subrepos, listignored,
1132 s = self.dirstate.status(match, subrepos, listignored,
1146 listclean, listunknown)
1133 listclean, listunknown)
1147 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1134 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1148
1135
1149 # check for any possibly clean files
1136 # check for any possibly clean files
1150 if parentworking and cmp:
1137 if parentworking and cmp:
1151 fixup = []
1138 fixup = []
1152 # do a full compare of any files that might have changed
1139 # do a full compare of any files that might have changed
1153 for f in sorted(cmp):
1140 for f in sorted(cmp):
1154 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1141 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1155 or ctx1[f].cmp(ctx2[f])):
1142 or ctx1[f].cmp(ctx2[f])):
1156 modified.append(f)
1143 modified.append(f)
1157 else:
1144 else:
1158 fixup.append(f)
1145 fixup.append(f)
1159
1146
1160 # update dirstate for files that are actually clean
1147 # update dirstate for files that are actually clean
1161 if fixup:
1148 if fixup:
1162 if listclean:
1149 if listclean:
1163 clean += fixup
1150 clean += fixup
1164
1151
1165 try:
1152 try:
1166 # updating the dirstate is optional
1153 # updating the dirstate is optional
1167 # so we don't wait on the lock
1154 # so we don't wait on the lock
1168 wlock = self.wlock(False)
1155 wlock = self.wlock(False)
1169 try:
1156 try:
1170 for f in fixup:
1157 for f in fixup:
1171 self.dirstate.normal(f)
1158 self.dirstate.normal(f)
1172 finally:
1159 finally:
1173 wlock.release()
1160 wlock.release()
1174 except error.LockError:
1161 except error.LockError:
1175 pass
1162 pass
1176
1163
1177 if not parentworking:
1164 if not parentworking:
1178 mf1 = mfmatches(ctx1)
1165 mf1 = mfmatches(ctx1)
1179 if working:
1166 if working:
1180 # we are comparing working dir against non-parent
1167 # we are comparing working dir against non-parent
1181 # generate a pseudo-manifest for the working dir
1168 # generate a pseudo-manifest for the working dir
1182 mf2 = mfmatches(self['.'])
1169 mf2 = mfmatches(self['.'])
1183 for f in cmp + modified + added:
1170 for f in cmp + modified + added:
1184 mf2[f] = None
1171 mf2[f] = None
1185 mf2.set(f, ctx2.flags(f))
1172 mf2.set(f, ctx2.flags(f))
1186 for f in removed:
1173 for f in removed:
1187 if f in mf2:
1174 if f in mf2:
1188 del mf2[f]
1175 del mf2[f]
1189 else:
1176 else:
1190 # we are comparing two revisions
1177 # we are comparing two revisions
1191 deleted, unknown, ignored = [], [], []
1178 deleted, unknown, ignored = [], [], []
1192 mf2 = mfmatches(ctx2)
1179 mf2 = mfmatches(ctx2)
1193
1180
1194 modified, added, clean = [], [], []
1181 modified, added, clean = [], [], []
1195 for fn in mf2:
1182 for fn in mf2:
1196 if fn in mf1:
1183 if fn in mf1:
1197 if (mf1.flags(fn) != mf2.flags(fn) or
1184 if (mf1.flags(fn) != mf2.flags(fn) or
1198 (mf1[fn] != mf2[fn] and
1185 (mf1[fn] != mf2[fn] and
1199 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1186 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1200 modified.append(fn)
1187 modified.append(fn)
1201 elif listclean:
1188 elif listclean:
1202 clean.append(fn)
1189 clean.append(fn)
1203 del mf1[fn]
1190 del mf1[fn]
1204 else:
1191 else:
1205 added.append(fn)
1192 added.append(fn)
1206 removed = mf1.keys()
1193 removed = mf1.keys()
1207
1194
1208 r = modified, added, removed, deleted, unknown, ignored, clean
1195 r = modified, added, removed, deleted, unknown, ignored, clean
1209
1196
1210 if listsubrepos:
1197 if listsubrepos:
1211 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1198 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1212 if working:
1199 if working:
1213 rev2 = None
1200 rev2 = None
1214 else:
1201 else:
1215 rev2 = ctx2.substate[subpath][1]
1202 rev2 = ctx2.substate[subpath][1]
1216 try:
1203 try:
1217 submatch = matchmod.narrowmatcher(subpath, match)
1204 submatch = matchmod.narrowmatcher(subpath, match)
1218 s = sub.status(rev2, match=submatch, ignored=listignored,
1205 s = sub.status(rev2, match=submatch, ignored=listignored,
1219 clean=listclean, unknown=listunknown,
1206 clean=listclean, unknown=listunknown,
1220 listsubrepos=True)
1207 listsubrepos=True)
1221 for rfiles, sfiles in zip(r, s):
1208 for rfiles, sfiles in zip(r, s):
1222 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1209 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1223 except error.LookupError:
1210 except error.LookupError:
1224 self.ui.status(_("skipping missing subrepository: %s\n")
1211 self.ui.status(_("skipping missing subrepository: %s\n")
1225 % subpath)
1212 % subpath)
1226
1213
1227 [l.sort() for l in r]
1214 [l.sort() for l in r]
1228 return r
1215 return r
1229
1216
1230 def heads(self, start=None):
1217 def heads(self, start=None):
1231 heads = self.changelog.heads(start)
1218 heads = self.changelog.heads(start)
1232 # sort the output in rev descending order
1219 # sort the output in rev descending order
1233 return sorted(heads, key=self.changelog.rev, reverse=True)
1220 return sorted(heads, key=self.changelog.rev, reverse=True)
1234
1221
1235 def branchheads(self, branch=None, start=None, closed=False):
1222 def branchheads(self, branch=None, start=None, closed=False):
1236 '''return a (possibly filtered) list of heads for the given branch
1223 '''return a (possibly filtered) list of heads for the given branch
1237
1224
1238 Heads are returned in topological order, from newest to oldest.
1225 Heads are returned in topological order, from newest to oldest.
1239 If branch is None, use the dirstate branch.
1226 If branch is None, use the dirstate branch.
1240 If start is not None, return only heads reachable from start.
1227 If start is not None, return only heads reachable from start.
1241 If closed is True, return heads that are marked as closed as well.
1228 If closed is True, return heads that are marked as closed as well.
1242 '''
1229 '''
1243 if branch is None:
1230 if branch is None:
1244 branch = self[None].branch()
1231 branch = self[None].branch()
1245 branches = self.branchmap()
1232 branches = self.branchmap()
1246 if branch not in branches:
1233 if branch not in branches:
1247 return []
1234 return []
1248 # the cache returns heads ordered lowest to highest
1235 # the cache returns heads ordered lowest to highest
1249 bheads = list(reversed(branches[branch]))
1236 bheads = list(reversed(branches[branch]))
1250 if start is not None:
1237 if start is not None:
1251 # filter out the heads that cannot be reached from startrev
1238 # filter out the heads that cannot be reached from startrev
1252 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1239 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1253 bheads = [h for h in bheads if h in fbheads]
1240 bheads = [h for h in bheads if h in fbheads]
1254 if not closed:
1241 if not closed:
1255 bheads = [h for h in bheads if
1242 bheads = [h for h in bheads if
1256 ('close' not in self.changelog.read(h)[5])]
1243 ('close' not in self.changelog.read(h)[5])]
1257 return bheads
1244 return bheads
1258
1245
1259 def branches(self, nodes):
1246 def branches(self, nodes):
1260 if not nodes:
1247 if not nodes:
1261 nodes = [self.changelog.tip()]
1248 nodes = [self.changelog.tip()]
1262 b = []
1249 b = []
1263 for n in nodes:
1250 for n in nodes:
1264 t = n
1251 t = n
1265 while 1:
1252 while 1:
1266 p = self.changelog.parents(n)
1253 p = self.changelog.parents(n)
1267 if p[1] != nullid or p[0] == nullid:
1254 if p[1] != nullid or p[0] == nullid:
1268 b.append((t, n, p[0], p[1]))
1255 b.append((t, n, p[0], p[1]))
1269 break
1256 break
1270 n = p[0]
1257 n = p[0]
1271 return b
1258 return b
1272
1259
1273 def between(self, pairs):
1260 def between(self, pairs):
1274 r = []
1261 r = []
1275
1262
1276 for top, bottom in pairs:
1263 for top, bottom in pairs:
1277 n, l, i = top, [], 0
1264 n, l, i = top, [], 0
1278 f = 1
1265 f = 1
1279
1266
1280 while n != bottom and n != nullid:
1267 while n != bottom and n != nullid:
1281 p = self.changelog.parents(n)[0]
1268 p = self.changelog.parents(n)[0]
1282 if i == f:
1269 if i == f:
1283 l.append(n)
1270 l.append(n)
1284 f = f * 2
1271 f = f * 2
1285 n = p
1272 n = p
1286 i += 1
1273 i += 1
1287
1274
1288 r.append(l)
1275 r.append(l)
1289
1276
1290 return r
1277 return r
1291
1278
1292 def pull(self, remote, heads=None, force=False):
1279 def pull(self, remote, heads=None, force=False):
1293 lock = self.lock()
1280 lock = self.lock()
1294 try:
1281 try:
1295 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1282 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1296 force=force)
1283 force=force)
1297 common, fetch, rheads = tmp
1284 common, fetch, rheads = tmp
1298 if not fetch:
1285 if not fetch:
1299 self.ui.status(_("no changes found\n"))
1286 self.ui.status(_("no changes found\n"))
1300 return 0
1287 return 0
1301
1288
1302 if heads is None and fetch == [nullid]:
1289 if heads is None and fetch == [nullid]:
1303 self.ui.status(_("requesting all changes\n"))
1290 self.ui.status(_("requesting all changes\n"))
1304 elif heads is None and remote.capable('changegroupsubset'):
1291 elif heads is None and remote.capable('changegroupsubset'):
1305 # issue1320, avoid a race if remote changed after discovery
1292 # issue1320, avoid a race if remote changed after discovery
1306 heads = rheads
1293 heads = rheads
1307
1294
1308 if heads is None:
1295 if heads is None:
1309 cg = remote.changegroup(fetch, 'pull')
1296 cg = remote.changegroup(fetch, 'pull')
1310 else:
1297 else:
1311 if not remote.capable('changegroupsubset'):
1298 if not remote.capable('changegroupsubset'):
1312 raise util.Abort(_("partial pull cannot be done because "
1299 raise util.Abort(_("partial pull cannot be done because "
1313 "other repository doesn't support "
1300 "other repository doesn't support "
1314 "changegroupsubset."))
1301 "changegroupsubset."))
1315 cg = remote.changegroupsubset(fetch, heads, 'pull')
1302 cg = remote.changegroupsubset(fetch, heads, 'pull')
1316 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1303 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1317 finally:
1304 finally:
1318 lock.release()
1305 lock.release()
1319
1306
1320 def push(self, remote, force=False, revs=None, newbranch=False):
1307 def push(self, remote, force=False, revs=None, newbranch=False):
1321 '''Push outgoing changesets (limited by revs) from the current
1308 '''Push outgoing changesets (limited by revs) from the current
1322 repository to remote. Return an integer:
1309 repository to remote. Return an integer:
1323 - 0 means HTTP error *or* nothing to push
1310 - 0 means HTTP error *or* nothing to push
1324 - 1 means we pushed and remote head count is unchanged *or*
1311 - 1 means we pushed and remote head count is unchanged *or*
1325 we have outgoing changesets but refused to push
1312 we have outgoing changesets but refused to push
1326 - other values as described by addchangegroup()
1313 - other values as described by addchangegroup()
1327 '''
1314 '''
1328 # there are two ways to push to remote repo:
1315 # there are two ways to push to remote repo:
1329 #
1316 #
1330 # addchangegroup assumes local user can lock remote
1317 # addchangegroup assumes local user can lock remote
1331 # repo (local filesystem, old ssh servers).
1318 # repo (local filesystem, old ssh servers).
1332 #
1319 #
1333 # unbundle assumes local user cannot lock remote repo (new ssh
1320 # unbundle assumes local user cannot lock remote repo (new ssh
1334 # servers, http servers).
1321 # servers, http servers).
1335
1322
1336 lock = None
1323 lock = None
1337 unbundle = remote.capable('unbundle')
1324 unbundle = remote.capable('unbundle')
1338 if not unbundle:
1325 if not unbundle:
1339 lock = remote.lock()
1326 lock = remote.lock()
1340 try:
1327 try:
1341 ret = discovery.prepush(self, remote, force, revs, newbranch)
1328 ret = discovery.prepush(self, remote, force, revs, newbranch)
1342 if ret[0] is None:
1329 if ret[0] is None:
1343 # and here we return 0 for "nothing to push" or 1 for
1330 # and here we return 0 for "nothing to push" or 1 for
1344 # "something to push but I refuse"
1331 # "something to push but I refuse"
1345 return ret[1]
1332 return ret[1]
1346
1333
1347 cg, remote_heads = ret
1334 cg, remote_heads = ret
1348 if unbundle:
1335 if unbundle:
1349 # local repo finds heads on server, finds out what revs it must
1336 # local repo finds heads on server, finds out what revs it must
1350 # push. once revs transferred, if server finds it has
1337 # push. once revs transferred, if server finds it has
1351 # different heads (someone else won commit/push race), server
1338 # different heads (someone else won commit/push race), server
1352 # aborts.
1339 # aborts.
1353 if force:
1340 if force:
1354 remote_heads = ['force']
1341 remote_heads = ['force']
1355 # ssh: return remote's addchangegroup()
1342 # ssh: return remote's addchangegroup()
1356 # http: return remote's addchangegroup() or 0 for error
1343 # http: return remote's addchangegroup() or 0 for error
1357 return remote.unbundle(cg, remote_heads, 'push')
1344 return remote.unbundle(cg, remote_heads, 'push')
1358 else:
1345 else:
1359 # we return an integer indicating remote head count change
1346 # we return an integer indicating remote head count change
1360 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1347 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1361 finally:
1348 finally:
1362 if lock is not None:
1349 if lock is not None:
1363 lock.release()
1350 lock.release()
1364
1351
1365 def changegroupinfo(self, nodes, source):
1352 def changegroupinfo(self, nodes, source):
1366 if self.ui.verbose or source == 'bundle':
1353 if self.ui.verbose or source == 'bundle':
1367 self.ui.status(_("%d changesets found\n") % len(nodes))
1354 self.ui.status(_("%d changesets found\n") % len(nodes))
1368 if self.ui.debugflag:
1355 if self.ui.debugflag:
1369 self.ui.debug("list of changesets:\n")
1356 self.ui.debug("list of changesets:\n")
1370 for node in nodes:
1357 for node in nodes:
1371 self.ui.debug("%s\n" % hex(node))
1358 self.ui.debug("%s\n" % hex(node))
1372
1359
1373 def changegroupsubset(self, bases, heads, source, extranodes=None):
1360 def changegroupsubset(self, bases, heads, source, extranodes=None):
1374 """Compute a changegroup consisting of all the nodes that are
1361 """Compute a changegroup consisting of all the nodes that are
1375 descendents of any of the bases and ancestors of any of the heads.
1362 descendents of any of the bases and ancestors of any of the heads.
1376 Return a chunkbuffer object whose read() method will return
1363 Return a chunkbuffer object whose read() method will return
1377 successive changegroup chunks.
1364 successive changegroup chunks.
1378
1365
1379 It is fairly complex as determining which filenodes and which
1366 It is fairly complex as determining which filenodes and which
1380 manifest nodes need to be included for the changeset to be complete
1367 manifest nodes need to be included for the changeset to be complete
1381 is non-trivial.
1368 is non-trivial.
1382
1369
1383 Another wrinkle is doing the reverse, figuring out which changeset in
1370 Another wrinkle is doing the reverse, figuring out which changeset in
1384 the changegroup a particular filenode or manifestnode belongs to.
1371 the changegroup a particular filenode or manifestnode belongs to.
1385
1372
1386 The caller can specify some nodes that must be included in the
1373 The caller can specify some nodes that must be included in the
1387 changegroup using the extranodes argument. It should be a dict
1374 changegroup using the extranodes argument. It should be a dict
1388 where the keys are the filenames (or 1 for the manifest), and the
1375 where the keys are the filenames (or 1 for the manifest), and the
1389 values are lists of (node, linknode) tuples, where node is a wanted
1376 values are lists of (node, linknode) tuples, where node is a wanted
1390 node and linknode is the changelog node that should be transmitted as
1377 node and linknode is the changelog node that should be transmitted as
1391 the linkrev.
1378 the linkrev.
1392 """
1379 """
1393
1380
1394 # Set up some initial variables
1381 # Set up some initial variables
1395 # Make it easy to refer to self.changelog
1382 # Make it easy to refer to self.changelog
1396 cl = self.changelog
1383 cl = self.changelog
1397 # Compute the list of changesets in this changegroup.
1384 # Compute the list of changesets in this changegroup.
1398 # Some bases may turn out to be superfluous, and some heads may be
1385 # Some bases may turn out to be superfluous, and some heads may be
1399 # too. nodesbetween will return the minimal set of bases and heads
1386 # too. nodesbetween will return the minimal set of bases and heads
1400 # necessary to re-create the changegroup.
1387 # necessary to re-create the changegroup.
1401 if not bases:
1388 if not bases:
1402 bases = [nullid]
1389 bases = [nullid]
1403 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1390 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1404
1391
1405 if extranodes is None:
1392 if extranodes is None:
1406 # can we go through the fast path ?
1393 # can we go through the fast path ?
1407 heads.sort()
1394 heads.sort()
1408 allheads = self.heads()
1395 allheads = self.heads()
1409 allheads.sort()
1396 allheads.sort()
1410 if heads == allheads:
1397 if heads == allheads:
1411 return self._changegroup(msng_cl_lst, source)
1398 return self._changegroup(msng_cl_lst, source)
1412
1399
1413 # slow path
1400 # slow path
1414 self.hook('preoutgoing', throw=True, source=source)
1401 self.hook('preoutgoing', throw=True, source=source)
1415
1402
1416 self.changegroupinfo(msng_cl_lst, source)
1403 self.changegroupinfo(msng_cl_lst, source)
1417
1404
1418 # We assume that all ancestors of bases are known
1405 # We assume that all ancestors of bases are known
1419 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1406 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1420
1407
1421 # Make it easy to refer to self.manifest
1408 # Make it easy to refer to self.manifest
1422 mnfst = self.manifest
1409 mnfst = self.manifest
1423 # We don't know which manifests are missing yet
1410 # We don't know which manifests are missing yet
1424 msng_mnfst_set = {}
1411 msng_mnfst_set = {}
1425 # Nor do we know which filenodes are missing.
1412 # Nor do we know which filenodes are missing.
1426 msng_filenode_set = {}
1413 msng_filenode_set = {}
1427
1414
1428 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1415 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1429 junk = None
1416 junk = None
1430
1417
1431 # A changeset always belongs to itself, so the changenode lookup
1418 # A changeset always belongs to itself, so the changenode lookup
1432 # function for a changenode is identity.
1419 # function for a changenode is identity.
1433 def identity(x):
1420 def identity(x):
1434 return x
1421 return x
1435
1422
1436 # A function generating function that sets up the initial environment
1423 # A function generating function that sets up the initial environment
1437 # the inner function.
1424 # the inner function.
1438 def filenode_collector(changedfiles):
1425 def filenode_collector(changedfiles):
1439 # This gathers information from each manifestnode included in the
1426 # This gathers information from each manifestnode included in the
1440 # changegroup about which filenodes the manifest node references
1427 # changegroup about which filenodes the manifest node references
1441 # so we can include those in the changegroup too.
1428 # so we can include those in the changegroup too.
1442 #
1429 #
1443 # It also remembers which changenode each filenode belongs to. It
1430 # It also remembers which changenode each filenode belongs to. It
1444 # does this by assuming the a filenode belongs to the changenode
1431 # does this by assuming the a filenode belongs to the changenode
1445 # the first manifest that references it belongs to.
1432 # the first manifest that references it belongs to.
1446 def collect_msng_filenodes(mnfstnode):
1433 def collect_msng_filenodes(mnfstnode):
1447 r = mnfst.rev(mnfstnode)
1434 r = mnfst.rev(mnfstnode)
1448 if mnfst.deltaparent(r) in mnfst.parentrevs(r):
1435 if mnfst.deltaparent(r) in mnfst.parentrevs(r):
1449 # If the previous rev is one of the parents,
1436 # If the previous rev is one of the parents,
1450 # we only need to see a diff.
1437 # we only need to see a diff.
1451 deltamf = mnfst.readdelta(mnfstnode)
1438 deltamf = mnfst.readdelta(mnfstnode)
1452 # For each line in the delta
1439 # For each line in the delta
1453 for f, fnode in deltamf.iteritems():
1440 for f, fnode in deltamf.iteritems():
1454 # And if the file is in the list of files we care
1441 # And if the file is in the list of files we care
1455 # about.
1442 # about.
1456 if f in changedfiles:
1443 if f in changedfiles:
1457 # Get the changenode this manifest belongs to
1444 # Get the changenode this manifest belongs to
1458 clnode = msng_mnfst_set[mnfstnode]
1445 clnode = msng_mnfst_set[mnfstnode]
1459 # Create the set of filenodes for the file if
1446 # Create the set of filenodes for the file if
1460 # there isn't one already.
1447 # there isn't one already.
1461 ndset = msng_filenode_set.setdefault(f, {})
1448 ndset = msng_filenode_set.setdefault(f, {})
1462 # And set the filenode's changelog node to the
1449 # And set the filenode's changelog node to the
1463 # manifest's if it hasn't been set already.
1450 # manifest's if it hasn't been set already.
1464 ndset.setdefault(fnode, clnode)
1451 ndset.setdefault(fnode, clnode)
1465 else:
1452 else:
1466 # Otherwise we need a full manifest.
1453 # Otherwise we need a full manifest.
1467 m = mnfst.read(mnfstnode)
1454 m = mnfst.read(mnfstnode)
1468 # For every file in we care about.
1455 # For every file in we care about.
1469 for f in changedfiles:
1456 for f in changedfiles:
1470 fnode = m.get(f, None)
1457 fnode = m.get(f, None)
1471 # If it's in the manifest
1458 # If it's in the manifest
1472 if fnode is not None:
1459 if fnode is not None:
1473 # See comments above.
1460 # See comments above.
1474 clnode = msng_mnfst_set[mnfstnode]
1461 clnode = msng_mnfst_set[mnfstnode]
1475 ndset = msng_filenode_set.setdefault(f, {})
1462 ndset = msng_filenode_set.setdefault(f, {})
1476 ndset.setdefault(fnode, clnode)
1463 ndset.setdefault(fnode, clnode)
1477 return collect_msng_filenodes
1464 return collect_msng_filenodes
1478
1465
1479 # If we determine that a particular file or manifest node must be a
1466 # If we determine that a particular file or manifest node must be a
1480 # node that the recipient of the changegroup will already have, we can
1467 # node that the recipient of the changegroup will already have, we can
1481 # also assume the recipient will have all the parents. This function
1468 # also assume the recipient will have all the parents. This function
1482 # prunes them from the set of missing nodes.
1469 # prunes them from the set of missing nodes.
1483 def prune(revlog, missingnodes):
1470 def prune(revlog, missingnodes):
1484 hasset = set()
1471 hasset = set()
1485 # If a 'missing' filenode thinks it belongs to a changenode we
1472 # If a 'missing' filenode thinks it belongs to a changenode we
1486 # assume the recipient must have, then the recipient must have
1473 # assume the recipient must have, then the recipient must have
1487 # that filenode.
1474 # that filenode.
1488 for n in missingnodes:
1475 for n in missingnodes:
1489 clrev = revlog.linkrev(revlog.rev(n))
1476 clrev = revlog.linkrev(revlog.rev(n))
1490 if clrev in commonrevs:
1477 if clrev in commonrevs:
1491 hasset.add(n)
1478 hasset.add(n)
1492 for n in hasset:
1479 for n in hasset:
1493 missingnodes.pop(n, None)
1480 missingnodes.pop(n, None)
1494 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1481 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1495 missingnodes.pop(revlog.node(r), None)
1482 missingnodes.pop(revlog.node(r), None)
1496
1483
1497 # Add the nodes that were explicitly requested.
1484 # Add the nodes that were explicitly requested.
1498 def add_extra_nodes(name, nodes):
1485 def add_extra_nodes(name, nodes):
1499 if not extranodes or name not in extranodes:
1486 if not extranodes or name not in extranodes:
1500 return
1487 return
1501
1488
1502 for node, linknode in extranodes[name]:
1489 for node, linknode in extranodes[name]:
1503 if node not in nodes:
1490 if node not in nodes:
1504 nodes[node] = linknode
1491 nodes[node] = linknode
1505
1492
1506 # Now that we have all theses utility functions to help out and
1493 # Now that we have all theses utility functions to help out and
1507 # logically divide up the task, generate the group.
1494 # logically divide up the task, generate the group.
1508 def gengroup():
1495 def gengroup():
1509 # The set of changed files starts empty.
1496 # The set of changed files starts empty.
1510 changedfiles = set()
1497 changedfiles = set()
1511 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1498 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1512
1499
1513 # Create a changenode group generator that will call our functions
1500 # Create a changenode group generator that will call our functions
1514 # back to lookup the owning changenode and collect information.
1501 # back to lookup the owning changenode and collect information.
1515 group = cl.group(msng_cl_lst, identity, collect)
1502 group = cl.group(msng_cl_lst, identity, collect)
1516 for cnt, chnk in enumerate(group):
1503 for cnt, chnk in enumerate(group):
1517 yield chnk
1504 yield chnk
1518 # revlog.group yields three entries per node, so
1505 # revlog.group yields three entries per node, so
1519 # dividing by 3 gives an approximation of how many
1506 # dividing by 3 gives an approximation of how many
1520 # nodes have been processed.
1507 # nodes have been processed.
1521 self.ui.progress(_('bundling'), cnt / 3,
1508 self.ui.progress(_('bundling'), cnt / 3,
1522 unit=_('changesets'))
1509 unit=_('changesets'))
1523 changecount = cnt / 3
1510 changecount = cnt / 3
1524 self.ui.progress(_('bundling'), None)
1511 self.ui.progress(_('bundling'), None)
1525
1512
1526 prune(mnfst, msng_mnfst_set)
1513 prune(mnfst, msng_mnfst_set)
1527 add_extra_nodes(1, msng_mnfst_set)
1514 add_extra_nodes(1, msng_mnfst_set)
1528 msng_mnfst_lst = msng_mnfst_set.keys()
1515 msng_mnfst_lst = msng_mnfst_set.keys()
1529 # Sort the manifestnodes by revision number.
1516 # Sort the manifestnodes by revision number.
1530 msng_mnfst_lst.sort(key=mnfst.rev)
1517 msng_mnfst_lst.sort(key=mnfst.rev)
1531 # Create a generator for the manifestnodes that calls our lookup
1518 # Create a generator for the manifestnodes that calls our lookup
1532 # and data collection functions back.
1519 # and data collection functions back.
1533 group = mnfst.group(msng_mnfst_lst,
1520 group = mnfst.group(msng_mnfst_lst,
1534 lambda mnode: msng_mnfst_set[mnode],
1521 lambda mnode: msng_mnfst_set[mnode],
1535 filenode_collector(changedfiles))
1522 filenode_collector(changedfiles))
1536 efiles = {}
1523 efiles = {}
1537 for cnt, chnk in enumerate(group):
1524 for cnt, chnk in enumerate(group):
1538 if cnt % 3 == 1:
1525 if cnt % 3 == 1:
1539 mnode = chnk[:20]
1526 mnode = chnk[:20]
1540 efiles.update(mnfst.readdelta(mnode))
1527 efiles.update(mnfst.readdelta(mnode))
1541 yield chnk
1528 yield chnk
1542 # see above comment for why we divide by 3
1529 # see above comment for why we divide by 3
1543 self.ui.progress(_('bundling'), cnt / 3,
1530 self.ui.progress(_('bundling'), cnt / 3,
1544 unit=_('manifests'), total=changecount)
1531 unit=_('manifests'), total=changecount)
1545 self.ui.progress(_('bundling'), None)
1532 self.ui.progress(_('bundling'), None)
1546 efiles = len(efiles)
1533 efiles = len(efiles)
1547
1534
1548 # These are no longer needed, dereference and toss the memory for
1535 # These are no longer needed, dereference and toss the memory for
1549 # them.
1536 # them.
1550 msng_mnfst_lst = None
1537 msng_mnfst_lst = None
1551 msng_mnfst_set.clear()
1538 msng_mnfst_set.clear()
1552
1539
1553 if extranodes:
1540 if extranodes:
1554 for fname in extranodes:
1541 for fname in extranodes:
1555 if isinstance(fname, int):
1542 if isinstance(fname, int):
1556 continue
1543 continue
1557 msng_filenode_set.setdefault(fname, {})
1544 msng_filenode_set.setdefault(fname, {})
1558 changedfiles.add(fname)
1545 changedfiles.add(fname)
1559 # Go through all our files in order sorted by name.
1546 # Go through all our files in order sorted by name.
1560 for idx, fname in enumerate(sorted(changedfiles)):
1547 for idx, fname in enumerate(sorted(changedfiles)):
1561 filerevlog = self.file(fname)
1548 filerevlog = self.file(fname)
1562 if not len(filerevlog):
1549 if not len(filerevlog):
1563 raise util.Abort(_("empty or missing revlog for %s") % fname)
1550 raise util.Abort(_("empty or missing revlog for %s") % fname)
1564 # Toss out the filenodes that the recipient isn't really
1551 # Toss out the filenodes that the recipient isn't really
1565 # missing.
1552 # missing.
1566 missingfnodes = msng_filenode_set.pop(fname, {})
1553 missingfnodes = msng_filenode_set.pop(fname, {})
1567 prune(filerevlog, missingfnodes)
1554 prune(filerevlog, missingfnodes)
1568 add_extra_nodes(fname, missingfnodes)
1555 add_extra_nodes(fname, missingfnodes)
1569 # If any filenodes are left, generate the group for them,
1556 # If any filenodes are left, generate the group for them,
1570 # otherwise don't bother.
1557 # otherwise don't bother.
1571 if missingfnodes:
1558 if missingfnodes:
1572 yield changegroup.chunkheader(len(fname))
1559 yield changegroup.chunkheader(len(fname))
1573 yield fname
1560 yield fname
1574 # Sort the filenodes by their revision # (topological order)
1561 # Sort the filenodes by their revision # (topological order)
1575 nodeiter = list(missingfnodes)
1562 nodeiter = list(missingfnodes)
1576 nodeiter.sort(key=filerevlog.rev)
1563 nodeiter.sort(key=filerevlog.rev)
1577 # Create a group generator and only pass in a changenode
1564 # Create a group generator and only pass in a changenode
1578 # lookup function as we need to collect no information
1565 # lookup function as we need to collect no information
1579 # from filenodes.
1566 # from filenodes.
1580 group = filerevlog.group(nodeiter,
1567 group = filerevlog.group(nodeiter,
1581 lambda fnode: missingfnodes[fnode])
1568 lambda fnode: missingfnodes[fnode])
1582 for chnk in group:
1569 for chnk in group:
1583 # even though we print the same progress on
1570 # even though we print the same progress on
1584 # most loop iterations, put the progress call
1571 # most loop iterations, put the progress call
1585 # here so that time estimates (if any) can be updated
1572 # here so that time estimates (if any) can be updated
1586 self.ui.progress(
1573 self.ui.progress(
1587 _('bundling'), idx, item=fname,
1574 _('bundling'), idx, item=fname,
1588 unit=_('files'), total=efiles)
1575 unit=_('files'), total=efiles)
1589 yield chnk
1576 yield chnk
1590 # Signal that no more groups are left.
1577 # Signal that no more groups are left.
1591 yield changegroup.closechunk()
1578 yield changegroup.closechunk()
1592 self.ui.progress(_('bundling'), None)
1579 self.ui.progress(_('bundling'), None)
1593
1580
1594 if msng_cl_lst:
1581 if msng_cl_lst:
1595 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1582 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1596
1583
1597 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1584 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1598
1585
1599 def changegroup(self, basenodes, source):
1586 def changegroup(self, basenodes, source):
1600 # to avoid a race we use changegroupsubset() (issue1320)
1587 # to avoid a race we use changegroupsubset() (issue1320)
1601 return self.changegroupsubset(basenodes, self.heads(), source)
1588 return self.changegroupsubset(basenodes, self.heads(), source)
1602
1589
1603 def _changegroup(self, nodes, source):
1590 def _changegroup(self, nodes, source):
1604 """Compute the changegroup of all nodes that we have that a recipient
1591 """Compute the changegroup of all nodes that we have that a recipient
1605 doesn't. Return a chunkbuffer object whose read() method will return
1592 doesn't. Return a chunkbuffer object whose read() method will return
1606 successive changegroup chunks.
1593 successive changegroup chunks.
1607
1594
1608 This is much easier than the previous function as we can assume that
1595 This is much easier than the previous function as we can assume that
1609 the recipient has any changenode we aren't sending them.
1596 the recipient has any changenode we aren't sending them.
1610
1597
1611 nodes is the set of nodes to send"""
1598 nodes is the set of nodes to send"""
1612
1599
1613 self.hook('preoutgoing', throw=True, source=source)
1600 self.hook('preoutgoing', throw=True, source=source)
1614
1601
1615 cl = self.changelog
1602 cl = self.changelog
1616 revset = set([cl.rev(n) for n in nodes])
1603 revset = set([cl.rev(n) for n in nodes])
1617 self.changegroupinfo(nodes, source)
1604 self.changegroupinfo(nodes, source)
1618
1605
1619 def identity(x):
1606 def identity(x):
1620 return x
1607 return x
1621
1608
1622 def gennodelst(log):
1609 def gennodelst(log):
1623 for r in log:
1610 for r in log:
1624 if log.linkrev(r) in revset:
1611 if log.linkrev(r) in revset:
1625 yield log.node(r)
1612 yield log.node(r)
1626
1613
1627 def lookuplinkrev_func(revlog):
1614 def lookuplinkrev_func(revlog):
1628 def lookuplinkrev(n):
1615 def lookuplinkrev(n):
1629 return cl.node(revlog.linkrev(revlog.rev(n)))
1616 return cl.node(revlog.linkrev(revlog.rev(n)))
1630 return lookuplinkrev
1617 return lookuplinkrev
1631
1618
1632 def gengroup():
1619 def gengroup():
1633 '''yield a sequence of changegroup chunks (strings)'''
1620 '''yield a sequence of changegroup chunks (strings)'''
1634 # construct a list of all changed files
1621 # construct a list of all changed files
1635 changedfiles = set()
1622 changedfiles = set()
1636 mmfs = {}
1623 mmfs = {}
1637 collect = changegroup.collector(cl, mmfs, changedfiles)
1624 collect = changegroup.collector(cl, mmfs, changedfiles)
1638
1625
1639 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1626 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1640 # revlog.group yields three entries per node, so
1627 # revlog.group yields three entries per node, so
1641 # dividing by 3 gives an approximation of how many
1628 # dividing by 3 gives an approximation of how many
1642 # nodes have been processed.
1629 # nodes have been processed.
1643 self.ui.progress(_('bundling'), cnt / 3, unit=_('changesets'))
1630 self.ui.progress(_('bundling'), cnt / 3, unit=_('changesets'))
1644 yield chnk
1631 yield chnk
1645 changecount = cnt / 3
1632 changecount = cnt / 3
1646 self.ui.progress(_('bundling'), None)
1633 self.ui.progress(_('bundling'), None)
1647
1634
1648 mnfst = self.manifest
1635 mnfst = self.manifest
1649 nodeiter = gennodelst(mnfst)
1636 nodeiter = gennodelst(mnfst)
1650 efiles = {}
1637 efiles = {}
1651 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1638 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1652 lookuplinkrev_func(mnfst))):
1639 lookuplinkrev_func(mnfst))):
1653 if cnt % 3 == 1:
1640 if cnt % 3 == 1:
1654 mnode = chnk[:20]
1641 mnode = chnk[:20]
1655 efiles.update(mnfst.readdelta(mnode))
1642 efiles.update(mnfst.readdelta(mnode))
1656 # see above comment for why we divide by 3
1643 # see above comment for why we divide by 3
1657 self.ui.progress(_('bundling'), cnt / 3,
1644 self.ui.progress(_('bundling'), cnt / 3,
1658 unit=_('manifests'), total=changecount)
1645 unit=_('manifests'), total=changecount)
1659 yield chnk
1646 yield chnk
1660 efiles = len(efiles)
1647 efiles = len(efiles)
1661 self.ui.progress(_('bundling'), None)
1648 self.ui.progress(_('bundling'), None)
1662
1649
1663 for idx, fname in enumerate(sorted(changedfiles)):
1650 for idx, fname in enumerate(sorted(changedfiles)):
1664 filerevlog = self.file(fname)
1651 filerevlog = self.file(fname)
1665 if not len(filerevlog):
1652 if not len(filerevlog):
1666 raise util.Abort(_("empty or missing revlog for %s") % fname)
1653 raise util.Abort(_("empty or missing revlog for %s") % fname)
1667 nodeiter = gennodelst(filerevlog)
1654 nodeiter = gennodelst(filerevlog)
1668 nodeiter = list(nodeiter)
1655 nodeiter = list(nodeiter)
1669 if nodeiter:
1656 if nodeiter:
1670 yield changegroup.chunkheader(len(fname))
1657 yield changegroup.chunkheader(len(fname))
1671 yield fname
1658 yield fname
1672 lookup = lookuplinkrev_func(filerevlog)
1659 lookup = lookuplinkrev_func(filerevlog)
1673 for chnk in filerevlog.group(nodeiter, lookup):
1660 for chnk in filerevlog.group(nodeiter, lookup):
1674 self.ui.progress(
1661 self.ui.progress(
1675 _('bundling'), idx, item=fname,
1662 _('bundling'), idx, item=fname,
1676 total=efiles, unit=_('files'))
1663 total=efiles, unit=_('files'))
1677 yield chnk
1664 yield chnk
1678 self.ui.progress(_('bundling'), None)
1665 self.ui.progress(_('bundling'), None)
1679
1666
1680 yield changegroup.closechunk()
1667 yield changegroup.closechunk()
1681
1668
1682 if nodes:
1669 if nodes:
1683 self.hook('outgoing', node=hex(nodes[0]), source=source)
1670 self.hook('outgoing', node=hex(nodes[0]), source=source)
1684
1671
1685 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1672 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1686
1673
1687 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1674 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1688 """Add the changegroup returned by source.read() to this repo.
1675 """Add the changegroup returned by source.read() to this repo.
1689 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1676 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1690 the URL of the repo where this changegroup is coming from.
1677 the URL of the repo where this changegroup is coming from.
1691
1678
1692 Return an integer summarizing the change to this repo:
1679 Return an integer summarizing the change to this repo:
1693 - nothing changed or no source: 0
1680 - nothing changed or no source: 0
1694 - more heads than before: 1+added heads (2..n)
1681 - more heads than before: 1+added heads (2..n)
1695 - fewer heads than before: -1-removed heads (-2..-n)
1682 - fewer heads than before: -1-removed heads (-2..-n)
1696 - number of heads stays the same: 1
1683 - number of heads stays the same: 1
1697 """
1684 """
1698 def csmap(x):
1685 def csmap(x):
1699 self.ui.debug("add changeset %s\n" % short(x))
1686 self.ui.debug("add changeset %s\n" % short(x))
1700 return len(cl)
1687 return len(cl)
1701
1688
1702 def revmap(x):
1689 def revmap(x):
1703 return cl.rev(x)
1690 return cl.rev(x)
1704
1691
1705 if not source:
1692 if not source:
1706 return 0
1693 return 0
1707
1694
1708 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1695 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1709
1696
1710 changesets = files = revisions = 0
1697 changesets = files = revisions = 0
1711 efiles = set()
1698 efiles = set()
1712
1699
1713 # write changelog data to temp files so concurrent readers will not see
1700 # write changelog data to temp files so concurrent readers will not see
1714 # inconsistent view
1701 # inconsistent view
1715 cl = self.changelog
1702 cl = self.changelog
1716 cl.delayupdate()
1703 cl.delayupdate()
1717 oldheads = len(cl.heads())
1704 oldheads = len(cl.heads())
1718
1705
1719 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1706 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1720 try:
1707 try:
1721 trp = weakref.proxy(tr)
1708 trp = weakref.proxy(tr)
1722 # pull off the changeset group
1709 # pull off the changeset group
1723 self.ui.status(_("adding changesets\n"))
1710 self.ui.status(_("adding changesets\n"))
1724 clstart = len(cl)
1711 clstart = len(cl)
1725 class prog(object):
1712 class prog(object):
1726 step = _('changesets')
1713 step = _('changesets')
1727 count = 1
1714 count = 1
1728 ui = self.ui
1715 ui = self.ui
1729 total = None
1716 total = None
1730 def __call__(self):
1717 def __call__(self):
1731 self.ui.progress(self.step, self.count, unit=_('chunks'),
1718 self.ui.progress(self.step, self.count, unit=_('chunks'),
1732 total=self.total)
1719 total=self.total)
1733 self.count += 1
1720 self.count += 1
1734 pr = prog()
1721 pr = prog()
1735 source.callback = pr
1722 source.callback = pr
1736
1723
1737 if (cl.addgroup(source, csmap, trp) is None
1724 if (cl.addgroup(source, csmap, trp) is None
1738 and not emptyok):
1725 and not emptyok):
1739 raise util.Abort(_("received changelog group is empty"))
1726 raise util.Abort(_("received changelog group is empty"))
1740 clend = len(cl)
1727 clend = len(cl)
1741 changesets = clend - clstart
1728 changesets = clend - clstart
1742 for c in xrange(clstart, clend):
1729 for c in xrange(clstart, clend):
1743 efiles.update(self[c].files())
1730 efiles.update(self[c].files())
1744 efiles = len(efiles)
1731 efiles = len(efiles)
1745 self.ui.progress(_('changesets'), None)
1732 self.ui.progress(_('changesets'), None)
1746
1733
1747 # pull off the manifest group
1734 # pull off the manifest group
1748 self.ui.status(_("adding manifests\n"))
1735 self.ui.status(_("adding manifests\n"))
1749 pr.step = _('manifests')
1736 pr.step = _('manifests')
1750 pr.count = 1
1737 pr.count = 1
1751 pr.total = changesets # manifests <= changesets
1738 pr.total = changesets # manifests <= changesets
1752 # no need to check for empty manifest group here:
1739 # no need to check for empty manifest group here:
1753 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1740 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1754 # no new manifest will be created and the manifest group will
1741 # no new manifest will be created and the manifest group will
1755 # be empty during the pull
1742 # be empty during the pull
1756 self.manifest.addgroup(source, revmap, trp)
1743 self.manifest.addgroup(source, revmap, trp)
1757 self.ui.progress(_('manifests'), None)
1744 self.ui.progress(_('manifests'), None)
1758
1745
1759 needfiles = {}
1746 needfiles = {}
1760 if self.ui.configbool('server', 'validate', default=False):
1747 if self.ui.configbool('server', 'validate', default=False):
1761 # validate incoming csets have their manifests
1748 # validate incoming csets have their manifests
1762 for cset in xrange(clstart, clend):
1749 for cset in xrange(clstart, clend):
1763 mfest = self.changelog.read(self.changelog.node(cset))[0]
1750 mfest = self.changelog.read(self.changelog.node(cset))[0]
1764 mfest = self.manifest.readdelta(mfest)
1751 mfest = self.manifest.readdelta(mfest)
1765 # store file nodes we must see
1752 # store file nodes we must see
1766 for f, n in mfest.iteritems():
1753 for f, n in mfest.iteritems():
1767 needfiles.setdefault(f, set()).add(n)
1754 needfiles.setdefault(f, set()).add(n)
1768
1755
1769 # process the files
1756 # process the files
1770 self.ui.status(_("adding file changes\n"))
1757 self.ui.status(_("adding file changes\n"))
1771 pr.step = 'files'
1758 pr.step = 'files'
1772 pr.count = 1
1759 pr.count = 1
1773 pr.total = efiles
1760 pr.total = efiles
1774 source.callback = None
1761 source.callback = None
1775
1762
1776 while 1:
1763 while 1:
1777 f = source.chunk()
1764 f = source.chunk()
1778 if not f:
1765 if not f:
1779 break
1766 break
1780 self.ui.debug("adding %s revisions\n" % f)
1767 self.ui.debug("adding %s revisions\n" % f)
1781 pr()
1768 pr()
1782 fl = self.file(f)
1769 fl = self.file(f)
1783 o = len(fl)
1770 o = len(fl)
1784 if fl.addgroup(source, revmap, trp) is None:
1771 if fl.addgroup(source, revmap, trp) is None:
1785 raise util.Abort(_("received file revlog group is empty"))
1772 raise util.Abort(_("received file revlog group is empty"))
1786 revisions += len(fl) - o
1773 revisions += len(fl) - o
1787 files += 1
1774 files += 1
1788 if f in needfiles:
1775 if f in needfiles:
1789 needs = needfiles[f]
1776 needs = needfiles[f]
1790 for new in xrange(o, len(fl)):
1777 for new in xrange(o, len(fl)):
1791 n = fl.node(new)
1778 n = fl.node(new)
1792 if n in needs:
1779 if n in needs:
1793 needs.remove(n)
1780 needs.remove(n)
1794 if not needs:
1781 if not needs:
1795 del needfiles[f]
1782 del needfiles[f]
1796 self.ui.progress(_('files'), None)
1783 self.ui.progress(_('files'), None)
1797
1784
1798 for f, needs in needfiles.iteritems():
1785 for f, needs in needfiles.iteritems():
1799 fl = self.file(f)
1786 fl = self.file(f)
1800 for n in needs:
1787 for n in needs:
1801 try:
1788 try:
1802 fl.rev(n)
1789 fl.rev(n)
1803 except error.LookupError:
1790 except error.LookupError:
1804 raise util.Abort(
1791 raise util.Abort(
1805 _('missing file data for %s:%s - run hg verify') %
1792 _('missing file data for %s:%s - run hg verify') %
1806 (f, hex(n)))
1793 (f, hex(n)))
1807
1794
1808 newheads = len(cl.heads())
1795 newheads = len(cl.heads())
1809 heads = ""
1796 heads = ""
1810 if oldheads and newheads != oldheads:
1797 if oldheads and newheads != oldheads:
1811 heads = _(" (%+d heads)") % (newheads - oldheads)
1798 heads = _(" (%+d heads)") % (newheads - oldheads)
1812
1799
1813 self.ui.status(_("added %d changesets"
1800 self.ui.status(_("added %d changesets"
1814 " with %d changes to %d files%s\n")
1801 " with %d changes to %d files%s\n")
1815 % (changesets, revisions, files, heads))
1802 % (changesets, revisions, files, heads))
1816
1803
1817 if changesets > 0:
1804 if changesets > 0:
1818 p = lambda: cl.writepending() and self.root or ""
1805 p = lambda: cl.writepending() and self.root or ""
1819 self.hook('pretxnchangegroup', throw=True,
1806 self.hook('pretxnchangegroup', throw=True,
1820 node=hex(cl.node(clstart)), source=srctype,
1807 node=hex(cl.node(clstart)), source=srctype,
1821 url=url, pending=p)
1808 url=url, pending=p)
1822
1809
1823 # make changelog see real files again
1810 # make changelog see real files again
1824 cl.finalize(trp)
1811 cl.finalize(trp)
1825
1812
1826 tr.close()
1813 tr.close()
1827 finally:
1814 finally:
1828 tr.release()
1815 tr.release()
1829 if lock:
1816 if lock:
1830 lock.release()
1817 lock.release()
1831
1818
1832 if changesets > 0:
1819 if changesets > 0:
1833 # forcefully update the on-disk branch cache
1820 # forcefully update the on-disk branch cache
1834 self.ui.debug("updating the branch cache\n")
1821 self.ui.debug("updating the branch cache\n")
1835 self.updatebranchcache()
1822 self.updatebranchcache()
1836 self.hook("changegroup", node=hex(cl.node(clstart)),
1823 self.hook("changegroup", node=hex(cl.node(clstart)),
1837 source=srctype, url=url)
1824 source=srctype, url=url)
1838
1825
1839 for i in xrange(clstart, clend):
1826 for i in xrange(clstart, clend):
1840 self.hook("incoming", node=hex(cl.node(i)),
1827 self.hook("incoming", node=hex(cl.node(i)),
1841 source=srctype, url=url)
1828 source=srctype, url=url)
1842
1829
1843 # never return 0 here:
1830 # never return 0 here:
1844 if newheads < oldheads:
1831 if newheads < oldheads:
1845 return newheads - oldheads - 1
1832 return newheads - oldheads - 1
1846 else:
1833 else:
1847 return newheads - oldheads + 1
1834 return newheads - oldheads + 1
1848
1835
1849
1836
1850 def stream_in(self, remote, requirements):
1837 def stream_in(self, remote, requirements):
1851 fp = remote.stream_out()
1838 fp = remote.stream_out()
1852 l = fp.readline()
1839 l = fp.readline()
1853 try:
1840 try:
1854 resp = int(l)
1841 resp = int(l)
1855 except ValueError:
1842 except ValueError:
1856 raise error.ResponseError(
1843 raise error.ResponseError(
1857 _('Unexpected response from remote server:'), l)
1844 _('Unexpected response from remote server:'), l)
1858 if resp == 1:
1845 if resp == 1:
1859 raise util.Abort(_('operation forbidden by server'))
1846 raise util.Abort(_('operation forbidden by server'))
1860 elif resp == 2:
1847 elif resp == 2:
1861 raise util.Abort(_('locking the remote repository failed'))
1848 raise util.Abort(_('locking the remote repository failed'))
1862 elif resp != 0:
1849 elif resp != 0:
1863 raise util.Abort(_('the server sent an unknown error code'))
1850 raise util.Abort(_('the server sent an unknown error code'))
1864 self.ui.status(_('streaming all changes\n'))
1851 self.ui.status(_('streaming all changes\n'))
1865 l = fp.readline()
1852 l = fp.readline()
1866 try:
1853 try:
1867 total_files, total_bytes = map(int, l.split(' ', 1))
1854 total_files, total_bytes = map(int, l.split(' ', 1))
1868 except (ValueError, TypeError):
1855 except (ValueError, TypeError):
1869 raise error.ResponseError(
1856 raise error.ResponseError(
1870 _('Unexpected response from remote server:'), l)
1857 _('Unexpected response from remote server:'), l)
1871 self.ui.status(_('%d files to transfer, %s of data\n') %
1858 self.ui.status(_('%d files to transfer, %s of data\n') %
1872 (total_files, util.bytecount(total_bytes)))
1859 (total_files, util.bytecount(total_bytes)))
1873 start = time.time()
1860 start = time.time()
1874 for i in xrange(total_files):
1861 for i in xrange(total_files):
1875 # XXX doesn't support '\n' or '\r' in filenames
1862 # XXX doesn't support '\n' or '\r' in filenames
1876 l = fp.readline()
1863 l = fp.readline()
1877 try:
1864 try:
1878 name, size = l.split('\0', 1)
1865 name, size = l.split('\0', 1)
1879 size = int(size)
1866 size = int(size)
1880 except (ValueError, TypeError):
1867 except (ValueError, TypeError):
1881 raise error.ResponseError(
1868 raise error.ResponseError(
1882 _('Unexpected response from remote server:'), l)
1869 _('Unexpected response from remote server:'), l)
1883 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1870 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1884 # for backwards compat, name was partially encoded
1871 # for backwards compat, name was partially encoded
1885 ofp = self.sopener(store.decodedir(name), 'w')
1872 ofp = self.sopener(store.decodedir(name), 'w')
1886 for chunk in util.filechunkiter(fp, limit=size):
1873 for chunk in util.filechunkiter(fp, limit=size):
1887 ofp.write(chunk)
1874 ofp.write(chunk)
1888 ofp.close()
1875 ofp.close()
1889 elapsed = time.time() - start
1876 elapsed = time.time() - start
1890 if elapsed <= 0:
1877 if elapsed <= 0:
1891 elapsed = 0.001
1878 elapsed = 0.001
1892 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1879 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1893 (util.bytecount(total_bytes), elapsed,
1880 (util.bytecount(total_bytes), elapsed,
1894 util.bytecount(total_bytes / elapsed)))
1881 util.bytecount(total_bytes / elapsed)))
1895
1882
1896 # new requirements = old non-format requirements + new format-related
1883 # new requirements = old non-format requirements + new format-related
1897 # requirements from the streamed-in repository
1884 # requirements from the streamed-in repository
1898 requirements.update(set(self.requirements) - self.supportedformats)
1885 requirements.update(set(self.requirements) - self.supportedformats)
1899 self._applyrequirements(requirements)
1886 self._applyrequirements(requirements)
1900 self._writerequirements()
1887 self._writerequirements()
1901
1888
1902 self.invalidate()
1889 self.invalidate()
1903 return len(self.heads()) + 1
1890 return len(self.heads()) + 1
1904
1891
1905 def clone(self, remote, heads=[], stream=False):
1892 def clone(self, remote, heads=[], stream=False):
1906 '''clone remote repository.
1893 '''clone remote repository.
1907
1894
1908 keyword arguments:
1895 keyword arguments:
1909 heads: list of revs to clone (forces use of pull)
1896 heads: list of revs to clone (forces use of pull)
1910 stream: use streaming clone if possible'''
1897 stream: use streaming clone if possible'''
1911
1898
1912 # now, all clients that can request uncompressed clones can
1899 # now, all clients that can request uncompressed clones can
1913 # read repo formats supported by all servers that can serve
1900 # read repo formats supported by all servers that can serve
1914 # them.
1901 # them.
1915
1902
1916 # if revlog format changes, client will have to check version
1903 # if revlog format changes, client will have to check version
1917 # and format flags on "stream" capability, and use
1904 # and format flags on "stream" capability, and use
1918 # uncompressed only if compatible.
1905 # uncompressed only if compatible.
1919
1906
1920 if stream and not heads:
1907 if stream and not heads:
1921 # 'stream' means remote revlog format is revlogv1 only
1908 # 'stream' means remote revlog format is revlogv1 only
1922 if remote.capable('stream'):
1909 if remote.capable('stream'):
1923 return self.stream_in(remote, set(('revlogv1',)))
1910 return self.stream_in(remote, set(('revlogv1',)))
1924 # otherwise, 'streamreqs' contains the remote revlog format
1911 # otherwise, 'streamreqs' contains the remote revlog format
1925 streamreqs = remote.capable('streamreqs')
1912 streamreqs = remote.capable('streamreqs')
1926 if streamreqs:
1913 if streamreqs:
1927 streamreqs = set(streamreqs.split(','))
1914 streamreqs = set(streamreqs.split(','))
1928 # if we support it, stream in and adjust our requirements
1915 # if we support it, stream in and adjust our requirements
1929 if not streamreqs - self.supportedformats:
1916 if not streamreqs - self.supportedformats:
1930 return self.stream_in(remote, streamreqs)
1917 return self.stream_in(remote, streamreqs)
1931 return self.pull(remote, heads)
1918 return self.pull(remote, heads)
1932
1919
1933 def pushkey(self, namespace, key, old, new):
1920 def pushkey(self, namespace, key, old, new):
1934 return pushkey.push(self, namespace, key, old, new)
1921 return pushkey.push(self, namespace, key, old, new)
1935
1922
1936 def listkeys(self, namespace):
1923 def listkeys(self, namespace):
1937 return pushkey.list(self, namespace)
1924 return pushkey.list(self, namespace)
1938
1925
1939 # used to avoid circular references so destructors work
1926 # used to avoid circular references so destructors work
1940 def aftertrans(files):
1927 def aftertrans(files):
1941 renamefiles = [tuple(t) for t in files]
1928 renamefiles = [tuple(t) for t in files]
1942 def a():
1929 def a():
1943 for src, dest in renamefiles:
1930 for src, dest in renamefiles:
1944 util.rename(src, dest)
1931 util.rename(src, dest)
1945 return a
1932 return a
1946
1933
1947 def instance(ui, path, create):
1934 def instance(ui, path, create):
1948 return localrepository(ui, util.drop_scheme('file', path), create)
1935 return localrepository(ui, util.drop_scheme('file', path), create)
1949
1936
1950 def islocal(path):
1937 def islocal(path):
1951 return True
1938 return True
@@ -1,891 +1,886 b''
1 # subrepo.py - sub-repository handling for Mercurial
1 # subrepo.py - sub-repository handling for Mercurial
2 #
2 #
3 # Copyright 2009-2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2009-2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import errno, os, re, xml.dom.minidom, shutil, urlparse, posixpath
8 import errno, os, re, xml.dom.minidom, shutil, urlparse, posixpath
9 import stat, subprocess, tarfile
9 import stat, subprocess, tarfile
10 from i18n import _
10 from i18n import _
11 import config, util, node, error, cmdutil
11 import config, util, node, error, cmdutil
12 hg = None
12 hg = None
13
13
14 nullstate = ('', '', 'empty')
14 nullstate = ('', '', 'empty')
15
15
16
17 def substate(ctx):
18 rev = {}
19 if '.hgsubstate' in ctx:
20 try:
21 for l in ctx['.hgsubstate'].data().splitlines():
22 revision, path = l.split(" ", 1)
23 rev[path] = revision
24 except IOError, err:
25 if err.errno != errno.ENOENT:
26 raise
27 return rev
28
29 def state(ctx, ui):
16 def state(ctx, ui):
30 """return a state dict, mapping subrepo paths configured in .hgsub
17 """return a state dict, mapping subrepo paths configured in .hgsub
31 to tuple: (source from .hgsub, revision from .hgsubstate, kind
18 to tuple: (source from .hgsub, revision from .hgsubstate, kind
32 (key in types dict))
19 (key in types dict))
33 """
20 """
34 p = config.config()
21 p = config.config()
35 def read(f, sections=None, remap=None):
22 def read(f, sections=None, remap=None):
36 if f in ctx:
23 if f in ctx:
37 try:
24 try:
38 data = ctx[f].data()
25 data = ctx[f].data()
39 except IOError, err:
26 except IOError, err:
40 if err.errno != errno.ENOENT:
27 if err.errno != errno.ENOENT:
41 raise
28 raise
42 # handle missing subrepo spec files as removed
29 # handle missing subrepo spec files as removed
43 ui.warn(_("warning: subrepo spec file %s not found\n") % f)
30 ui.warn(_("warning: subrepo spec file %s not found\n") % f)
44 return
31 return
45 p.parse(f, data, sections, remap, read)
32 p.parse(f, data, sections, remap, read)
46 else:
33 else:
47 raise util.Abort(_("subrepo spec file %s not found") % f)
34 raise util.Abort(_("subrepo spec file %s not found") % f)
48
35
49 if '.hgsub' in ctx:
36 if '.hgsub' in ctx:
50 read('.hgsub')
37 read('.hgsub')
51
38
52 for path, src in ui.configitems('subpaths'):
39 for path, src in ui.configitems('subpaths'):
53 p.set('subpaths', path, src, ui.configsource('subpaths', path))
40 p.set('subpaths', path, src, ui.configsource('subpaths', path))
54
41
55 rev = substate(ctx)
42 rev = {}
43 if '.hgsubstate' in ctx:
44 try:
45 for l in ctx['.hgsubstate'].data().splitlines():
46 revision, path = l.split(" ", 1)
47 rev[path] = revision
48 except IOError, err:
49 if err.errno != errno.ENOENT:
50 raise
56
51
57 state = {}
52 state = {}
58 for path, src in p[''].items():
53 for path, src in p[''].items():
59 kind = 'hg'
54 kind = 'hg'
60 if src.startswith('['):
55 if src.startswith('['):
61 if ']' not in src:
56 if ']' not in src:
62 raise util.Abort(_('missing ] in subrepo source'))
57 raise util.Abort(_('missing ] in subrepo source'))
63 kind, src = src.split(']', 1)
58 kind, src = src.split(']', 1)
64 kind = kind[1:]
59 kind = kind[1:]
65
60
66 for pattern, repl in p.items('subpaths'):
61 for pattern, repl in p.items('subpaths'):
67 # Turn r'C:\foo\bar' into r'C:\\foo\\bar' since re.sub
62 # Turn r'C:\foo\bar' into r'C:\\foo\\bar' since re.sub
68 # does a string decode.
63 # does a string decode.
69 repl = repl.encode('string-escape')
64 repl = repl.encode('string-escape')
70 # However, we still want to allow back references to go
65 # However, we still want to allow back references to go
71 # through unharmed, so we turn r'\\1' into r'\1'. Again,
66 # through unharmed, so we turn r'\\1' into r'\1'. Again,
72 # extra escapes are needed because re.sub string decodes.
67 # extra escapes are needed because re.sub string decodes.
73 repl = re.sub(r'\\\\([0-9]+)', r'\\\1', repl)
68 repl = re.sub(r'\\\\([0-9]+)', r'\\\1', repl)
74 try:
69 try:
75 src = re.sub(pattern, repl, src, 1)
70 src = re.sub(pattern, repl, src, 1)
76 except re.error, e:
71 except re.error, e:
77 raise util.Abort(_("bad subrepository pattern in %s: %s")
72 raise util.Abort(_("bad subrepository pattern in %s: %s")
78 % (p.source('subpaths', pattern), e))
73 % (p.source('subpaths', pattern), e))
79
74
80 state[path] = (src.strip(), rev.get(path, ''), kind)
75 state[path] = (src.strip(), rev.get(path, ''), kind)
81
76
82 return state
77 return state
83
78
84 def writestate(repo, state):
79 def writestate(repo, state):
85 """rewrite .hgsubstate in (outer) repo with these subrepo states"""
80 """rewrite .hgsubstate in (outer) repo with these subrepo states"""
86 repo.wwrite('.hgsubstate',
81 repo.wwrite('.hgsubstate',
87 ''.join(['%s %s\n' % (state[s][1], s)
82 ''.join(['%s %s\n' % (state[s][1], s)
88 for s in sorted(state)]), '')
83 for s in sorted(state)]), '')
89
84
90 def submerge(repo, wctx, mctx, actx):
85 def submerge(repo, wctx, mctx, actx):
91 """delegated from merge.applyupdates: merging of .hgsubstate file
86 """delegated from merge.applyupdates: merging of .hgsubstate file
92 in working context, merging context and ancestor context"""
87 in working context, merging context and ancestor context"""
93 if mctx == actx: # backwards?
88 if mctx == actx: # backwards?
94 actx = wctx.p1()
89 actx = wctx.p1()
95 s1 = wctx.substate
90 s1 = wctx.substate
96 s2 = mctx.substate
91 s2 = mctx.substate
97 sa = actx.substate
92 sa = actx.substate
98 sm = {}
93 sm = {}
99
94
100 repo.ui.debug("subrepo merge %s %s %s\n" % (wctx, mctx, actx))
95 repo.ui.debug("subrepo merge %s %s %s\n" % (wctx, mctx, actx))
101
96
102 def debug(s, msg, r=""):
97 def debug(s, msg, r=""):
103 if r:
98 if r:
104 r = "%s:%s:%s" % r
99 r = "%s:%s:%s" % r
105 repo.ui.debug(" subrepo %s: %s %s\n" % (s, msg, r))
100 repo.ui.debug(" subrepo %s: %s %s\n" % (s, msg, r))
106
101
107 for s, l in s1.items():
102 for s, l in s1.items():
108 a = sa.get(s, nullstate)
103 a = sa.get(s, nullstate)
109 ld = l # local state with possible dirty flag for compares
104 ld = l # local state with possible dirty flag for compares
110 if wctx.sub(s).dirty():
105 if wctx.sub(s).dirty():
111 ld = (l[0], l[1] + "+")
106 ld = (l[0], l[1] + "+")
112 if wctx == actx: # overwrite
107 if wctx == actx: # overwrite
113 a = ld
108 a = ld
114
109
115 if s in s2:
110 if s in s2:
116 r = s2[s]
111 r = s2[s]
117 if ld == r or r == a: # no change or local is newer
112 if ld == r or r == a: # no change or local is newer
118 sm[s] = l
113 sm[s] = l
119 continue
114 continue
120 elif ld == a: # other side changed
115 elif ld == a: # other side changed
121 debug(s, "other changed, get", r)
116 debug(s, "other changed, get", r)
122 wctx.sub(s).get(r)
117 wctx.sub(s).get(r)
123 sm[s] = r
118 sm[s] = r
124 elif ld[0] != r[0]: # sources differ
119 elif ld[0] != r[0]: # sources differ
125 if repo.ui.promptchoice(
120 if repo.ui.promptchoice(
126 _(' subrepository sources for %s differ\n'
121 _(' subrepository sources for %s differ\n'
127 'use (l)ocal source (%s) or (r)emote source (%s)?')
122 'use (l)ocal source (%s) or (r)emote source (%s)?')
128 % (s, l[0], r[0]),
123 % (s, l[0], r[0]),
129 (_('&Local'), _('&Remote')), 0):
124 (_('&Local'), _('&Remote')), 0):
130 debug(s, "prompt changed, get", r)
125 debug(s, "prompt changed, get", r)
131 wctx.sub(s).get(r)
126 wctx.sub(s).get(r)
132 sm[s] = r
127 sm[s] = r
133 elif ld[1] == a[1]: # local side is unchanged
128 elif ld[1] == a[1]: # local side is unchanged
134 debug(s, "other side changed, get", r)
129 debug(s, "other side changed, get", r)
135 wctx.sub(s).get(r)
130 wctx.sub(s).get(r)
136 sm[s] = r
131 sm[s] = r
137 else:
132 else:
138 debug(s, "both sides changed, merge with", r)
133 debug(s, "both sides changed, merge with", r)
139 wctx.sub(s).merge(r)
134 wctx.sub(s).merge(r)
140 sm[s] = l
135 sm[s] = l
141 elif ld == a: # remote removed, local unchanged
136 elif ld == a: # remote removed, local unchanged
142 debug(s, "remote removed, remove")
137 debug(s, "remote removed, remove")
143 wctx.sub(s).remove()
138 wctx.sub(s).remove()
144 else:
139 else:
145 if repo.ui.promptchoice(
140 if repo.ui.promptchoice(
146 _(' local changed subrepository %s which remote removed\n'
141 _(' local changed subrepository %s which remote removed\n'
147 'use (c)hanged version or (d)elete?') % s,
142 'use (c)hanged version or (d)elete?') % s,
148 (_('&Changed'), _('&Delete')), 0):
143 (_('&Changed'), _('&Delete')), 0):
149 debug(s, "prompt remove")
144 debug(s, "prompt remove")
150 wctx.sub(s).remove()
145 wctx.sub(s).remove()
151
146
152 for s, r in s2.items():
147 for s, r in s2.items():
153 if s in s1:
148 if s in s1:
154 continue
149 continue
155 elif s not in sa:
150 elif s not in sa:
156 debug(s, "remote added, get", r)
151 debug(s, "remote added, get", r)
157 mctx.sub(s).get(r)
152 mctx.sub(s).get(r)
158 sm[s] = r
153 sm[s] = r
159 elif r != sa[s]:
154 elif r != sa[s]:
160 if repo.ui.promptchoice(
155 if repo.ui.promptchoice(
161 _(' remote changed subrepository %s which local removed\n'
156 _(' remote changed subrepository %s which local removed\n'
162 'use (c)hanged version or (d)elete?') % s,
157 'use (c)hanged version or (d)elete?') % s,
163 (_('&Changed'), _('&Delete')), 0) == 0:
158 (_('&Changed'), _('&Delete')), 0) == 0:
164 debug(s, "prompt recreate", r)
159 debug(s, "prompt recreate", r)
165 wctx.sub(s).get(r)
160 wctx.sub(s).get(r)
166 sm[s] = r
161 sm[s] = r
167
162
168 # record merged .hgsubstate
163 # record merged .hgsubstate
169 writestate(repo, sm)
164 writestate(repo, sm)
170
165
171 def reporelpath(repo):
166 def reporelpath(repo):
172 """return path to this (sub)repo as seen from outermost repo"""
167 """return path to this (sub)repo as seen from outermost repo"""
173 parent = repo
168 parent = repo
174 while hasattr(parent, '_subparent'):
169 while hasattr(parent, '_subparent'):
175 parent = parent._subparent
170 parent = parent._subparent
176 return repo.root[len(parent.root)+1:]
171 return repo.root[len(parent.root)+1:]
177
172
178 def subrelpath(sub):
173 def subrelpath(sub):
179 """return path to this subrepo as seen from outermost repo"""
174 """return path to this subrepo as seen from outermost repo"""
180 if not hasattr(sub, '_repo'):
175 if not hasattr(sub, '_repo'):
181 return sub._path
176 return sub._path
182 return reporelpath(sub._repo)
177 return reporelpath(sub._repo)
183
178
184 def _abssource(repo, push=False, abort=True):
179 def _abssource(repo, push=False, abort=True):
185 """return pull/push path of repo - either based on parent repo .hgsub info
180 """return pull/push path of repo - either based on parent repo .hgsub info
186 or on the top repo config. Abort or return None if no source found."""
181 or on the top repo config. Abort or return None if no source found."""
187 if hasattr(repo, '_subparent'):
182 if hasattr(repo, '_subparent'):
188 source = repo._subsource
183 source = repo._subsource
189 if source.startswith('/') or '://' in source:
184 if source.startswith('/') or '://' in source:
190 return source
185 return source
191 parent = _abssource(repo._subparent, push, abort=False)
186 parent = _abssource(repo._subparent, push, abort=False)
192 if parent:
187 if parent:
193 if '://' in parent:
188 if '://' in parent:
194 if parent[-1] == '/':
189 if parent[-1] == '/':
195 parent = parent[:-1]
190 parent = parent[:-1]
196 r = urlparse.urlparse(parent + '/' + source)
191 r = urlparse.urlparse(parent + '/' + source)
197 r = urlparse.urlunparse((r[0], r[1],
192 r = urlparse.urlunparse((r[0], r[1],
198 posixpath.normpath(r[2]),
193 posixpath.normpath(r[2]),
199 r[3], r[4], r[5]))
194 r[3], r[4], r[5]))
200 return r
195 return r
201 else: # plain file system path
196 else: # plain file system path
202 return posixpath.normpath(os.path.join(parent, repo._subsource))
197 return posixpath.normpath(os.path.join(parent, repo._subsource))
203 else: # recursion reached top repo
198 else: # recursion reached top repo
204 if hasattr(repo, '_subtoppath'):
199 if hasattr(repo, '_subtoppath'):
205 return repo._subtoppath
200 return repo._subtoppath
206 if push and repo.ui.config('paths', 'default-push'):
201 if push and repo.ui.config('paths', 'default-push'):
207 return repo.ui.config('paths', 'default-push')
202 return repo.ui.config('paths', 'default-push')
208 if repo.ui.config('paths', 'default'):
203 if repo.ui.config('paths', 'default'):
209 return repo.ui.config('paths', 'default')
204 return repo.ui.config('paths', 'default')
210 if abort:
205 if abort:
211 raise util.Abort(_("default path for subrepository %s not found") %
206 raise util.Abort(_("default path for subrepository %s not found") %
212 reporelpath(repo))
207 reporelpath(repo))
213
208
214 def itersubrepos(ctx1, ctx2):
209 def itersubrepos(ctx1, ctx2):
215 """find subrepos in ctx1 or ctx2"""
210 """find subrepos in ctx1 or ctx2"""
216 # Create a (subpath, ctx) mapping where we prefer subpaths from
211 # Create a (subpath, ctx) mapping where we prefer subpaths from
217 # ctx1. The subpaths from ctx2 are important when the .hgsub file
212 # ctx1. The subpaths from ctx2 are important when the .hgsub file
218 # has been modified (in ctx2) but not yet committed (in ctx1).
213 # has been modified (in ctx2) but not yet committed (in ctx1).
219 subpaths = dict.fromkeys(ctx2.substate, ctx2)
214 subpaths = dict.fromkeys(ctx2.substate, ctx2)
220 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
215 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
221 for subpath, ctx in sorted(subpaths.iteritems()):
216 for subpath, ctx in sorted(subpaths.iteritems()):
222 yield subpath, ctx.sub(subpath)
217 yield subpath, ctx.sub(subpath)
223
218
224 def subrepo(ctx, path):
219 def subrepo(ctx, path):
225 """return instance of the right subrepo class for subrepo in path"""
220 """return instance of the right subrepo class for subrepo in path"""
226 # subrepo inherently violates our import layering rules
221 # subrepo inherently violates our import layering rules
227 # because it wants to make repo objects from deep inside the stack
222 # because it wants to make repo objects from deep inside the stack
228 # so we manually delay the circular imports to not break
223 # so we manually delay the circular imports to not break
229 # scripts that don't use our demand-loading
224 # scripts that don't use our demand-loading
230 global hg
225 global hg
231 import hg as h
226 import hg as h
232 hg = h
227 hg = h
233
228
234 util.path_auditor(ctx._repo.root)(path)
229 util.path_auditor(ctx._repo.root)(path)
235 state = ctx.substate.get(path, nullstate)
230 state = ctx.substate.get(path, nullstate)
236 if state[2] not in types:
231 if state[2] not in types:
237 raise util.Abort(_('unknown subrepo type %s') % state[2])
232 raise util.Abort(_('unknown subrepo type %s') % state[2])
238 return types[state[2]](ctx, path, state[:2])
233 return types[state[2]](ctx, path, state[:2])
239
234
240 # subrepo classes need to implement the following abstract class:
235 # subrepo classes need to implement the following abstract class:
241
236
242 class abstractsubrepo(object):
237 class abstractsubrepo(object):
243
238
244 def dirty(self):
239 def dirty(self):
245 """returns true if the dirstate of the subrepo does not match
240 """returns true if the dirstate of the subrepo does not match
246 current stored state
241 current stored state
247 """
242 """
248 raise NotImplementedError
243 raise NotImplementedError
249
244
250 def checknested(self, path):
245 def checknested(self, path):
251 """check if path is a subrepository within this repository"""
246 """check if path is a subrepository within this repository"""
252 return False
247 return False
253
248
254 def commit(self, text, user, date):
249 def commit(self, text, user, date):
255 """commit the current changes to the subrepo with the given
250 """commit the current changes to the subrepo with the given
256 log message. Use given user and date if possible. Return the
251 log message. Use given user and date if possible. Return the
257 new state of the subrepo.
252 new state of the subrepo.
258 """
253 """
259 raise NotImplementedError
254 raise NotImplementedError
260
255
261 def remove(self):
256 def remove(self):
262 """remove the subrepo
257 """remove the subrepo
263
258
264 (should verify the dirstate is not dirty first)
259 (should verify the dirstate is not dirty first)
265 """
260 """
266 raise NotImplementedError
261 raise NotImplementedError
267
262
268 def get(self, state):
263 def get(self, state):
269 """run whatever commands are needed to put the subrepo into
264 """run whatever commands are needed to put the subrepo into
270 this state
265 this state
271 """
266 """
272 raise NotImplementedError
267 raise NotImplementedError
273
268
274 def merge(self, state):
269 def merge(self, state):
275 """merge currently-saved state with the new state."""
270 """merge currently-saved state with the new state."""
276 raise NotImplementedError
271 raise NotImplementedError
277
272
278 def push(self, force):
273 def push(self, force):
279 """perform whatever action is analogous to 'hg push'
274 """perform whatever action is analogous to 'hg push'
280
275
281 This may be a no-op on some systems.
276 This may be a no-op on some systems.
282 """
277 """
283 raise NotImplementedError
278 raise NotImplementedError
284
279
285 def add(self, ui, match, dryrun, prefix):
280 def add(self, ui, match, dryrun, prefix):
286 return []
281 return []
287
282
288 def status(self, rev2, **opts):
283 def status(self, rev2, **opts):
289 return [], [], [], [], [], [], []
284 return [], [], [], [], [], [], []
290
285
291 def diff(self, diffopts, node2, match, prefix, **opts):
286 def diff(self, diffopts, node2, match, prefix, **opts):
292 pass
287 pass
293
288
294 def outgoing(self, ui, dest, opts):
289 def outgoing(self, ui, dest, opts):
295 return 1
290 return 1
296
291
297 def incoming(self, ui, source, opts):
292 def incoming(self, ui, source, opts):
298 return 1
293 return 1
299
294
300 def files(self):
295 def files(self):
301 """return filename iterator"""
296 """return filename iterator"""
302 raise NotImplementedError
297 raise NotImplementedError
303
298
304 def filedata(self, name):
299 def filedata(self, name):
305 """return file data"""
300 """return file data"""
306 raise NotImplementedError
301 raise NotImplementedError
307
302
308 def fileflags(self, name):
303 def fileflags(self, name):
309 """return file flags"""
304 """return file flags"""
310 return ''
305 return ''
311
306
312 def archive(self, ui, archiver, prefix):
307 def archive(self, ui, archiver, prefix):
313 files = self.files()
308 files = self.files()
314 total = len(files)
309 total = len(files)
315 relpath = subrelpath(self)
310 relpath = subrelpath(self)
316 ui.progress(_('archiving (%s)') % relpath, 0,
311 ui.progress(_('archiving (%s)') % relpath, 0,
317 unit=_('files'), total=total)
312 unit=_('files'), total=total)
318 for i, name in enumerate(files):
313 for i, name in enumerate(files):
319 flags = self.fileflags(name)
314 flags = self.fileflags(name)
320 mode = 'x' in flags and 0755 or 0644
315 mode = 'x' in flags and 0755 or 0644
321 symlink = 'l' in flags
316 symlink = 'l' in flags
322 archiver.addfile(os.path.join(prefix, self._path, name),
317 archiver.addfile(os.path.join(prefix, self._path, name),
323 mode, symlink, self.filedata(name))
318 mode, symlink, self.filedata(name))
324 ui.progress(_('archiving (%s)') % relpath, i + 1,
319 ui.progress(_('archiving (%s)') % relpath, i + 1,
325 unit=_('files'), total=total)
320 unit=_('files'), total=total)
326 ui.progress(_('archiving (%s)') % relpath, None)
321 ui.progress(_('archiving (%s)') % relpath, None)
327
322
328
323
329 class hgsubrepo(abstractsubrepo):
324 class hgsubrepo(abstractsubrepo):
330 def __init__(self, ctx, path, state):
325 def __init__(self, ctx, path, state):
331 self._path = path
326 self._path = path
332 self._state = state
327 self._state = state
333 r = ctx._repo
328 r = ctx._repo
334 root = r.wjoin(path)
329 root = r.wjoin(path)
335 create = False
330 create = False
336 if not os.path.exists(os.path.join(root, '.hg')):
331 if not os.path.exists(os.path.join(root, '.hg')):
337 create = True
332 create = True
338 util.makedirs(root)
333 util.makedirs(root)
339 self._repo = hg.repository(r.ui, root, create=create)
334 self._repo = hg.repository(r.ui, root, create=create)
340 self._repo._subparent = r
335 self._repo._subparent = r
341 self._repo._subsource = state[0]
336 self._repo._subsource = state[0]
342
337
343 if create:
338 if create:
344 fp = self._repo.opener("hgrc", "w", text=True)
339 fp = self._repo.opener("hgrc", "w", text=True)
345 fp.write('[paths]\n')
340 fp.write('[paths]\n')
346
341
347 def addpathconfig(key, value):
342 def addpathconfig(key, value):
348 if value:
343 if value:
349 fp.write('%s = %s\n' % (key, value))
344 fp.write('%s = %s\n' % (key, value))
350 self._repo.ui.setconfig('paths', key, value)
345 self._repo.ui.setconfig('paths', key, value)
351
346
352 defpath = _abssource(self._repo, abort=False)
347 defpath = _abssource(self._repo, abort=False)
353 defpushpath = _abssource(self._repo, True, abort=False)
348 defpushpath = _abssource(self._repo, True, abort=False)
354 addpathconfig('default', defpath)
349 addpathconfig('default', defpath)
355 if defpath != defpushpath:
350 if defpath != defpushpath:
356 addpathconfig('default-push', defpushpath)
351 addpathconfig('default-push', defpushpath)
357 fp.close()
352 fp.close()
358
353
359 def add(self, ui, match, dryrun, prefix):
354 def add(self, ui, match, dryrun, prefix):
360 return cmdutil.add(ui, self._repo, match, dryrun, True,
355 return cmdutil.add(ui, self._repo, match, dryrun, True,
361 os.path.join(prefix, self._path))
356 os.path.join(prefix, self._path))
362
357
363 def status(self, rev2, **opts):
358 def status(self, rev2, **opts):
364 try:
359 try:
365 rev1 = self._state[1]
360 rev1 = self._state[1]
366 ctx1 = self._repo[rev1]
361 ctx1 = self._repo[rev1]
367 ctx2 = self._repo[rev2]
362 ctx2 = self._repo[rev2]
368 return self._repo.status(ctx1, ctx2, **opts)
363 return self._repo.status(ctx1, ctx2, **opts)
369 except error.RepoLookupError, inst:
364 except error.RepoLookupError, inst:
370 self._repo.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
365 self._repo.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
371 % (inst, subrelpath(self)))
366 % (inst, subrelpath(self)))
372 return [], [], [], [], [], [], []
367 return [], [], [], [], [], [], []
373
368
374 def diff(self, diffopts, node2, match, prefix, **opts):
369 def diff(self, diffopts, node2, match, prefix, **opts):
375 try:
370 try:
376 node1 = node.bin(self._state[1])
371 node1 = node.bin(self._state[1])
377 # We currently expect node2 to come from substate and be
372 # We currently expect node2 to come from substate and be
378 # in hex format
373 # in hex format
379 if node2 is not None:
374 if node2 is not None:
380 node2 = node.bin(node2)
375 node2 = node.bin(node2)
381 cmdutil.diffordiffstat(self._repo.ui, self._repo, diffopts,
376 cmdutil.diffordiffstat(self._repo.ui, self._repo, diffopts,
382 node1, node2, match,
377 node1, node2, match,
383 prefix=os.path.join(prefix, self._path),
378 prefix=os.path.join(prefix, self._path),
384 listsubrepos=True, **opts)
379 listsubrepos=True, **opts)
385 except error.RepoLookupError, inst:
380 except error.RepoLookupError, inst:
386 self._repo.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
381 self._repo.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
387 % (inst, subrelpath(self)))
382 % (inst, subrelpath(self)))
388
383
389 def archive(self, ui, archiver, prefix):
384 def archive(self, ui, archiver, prefix):
390 abstractsubrepo.archive(self, ui, archiver, prefix)
385 abstractsubrepo.archive(self, ui, archiver, prefix)
391
386
392 rev = self._state[1]
387 rev = self._state[1]
393 ctx = self._repo[rev]
388 ctx = self._repo[rev]
394 for subpath in ctx.substate:
389 for subpath in ctx.substate:
395 s = subrepo(ctx, subpath)
390 s = subrepo(ctx, subpath)
396 s.archive(ui, archiver, os.path.join(prefix, self._path))
391 s.archive(ui, archiver, os.path.join(prefix, self._path))
397
392
398 def dirty(self):
393 def dirty(self):
399 r = self._state[1]
394 r = self._state[1]
400 if r == '':
395 if r == '':
401 return True
396 return True
402 w = self._repo[None]
397 w = self._repo[None]
403 if w.p1() != self._repo[r]: # version checked out change
398 if w.p1() != self._repo[r]: # version checked out change
404 return True
399 return True
405 return w.dirty() # working directory changed
400 return w.dirty() # working directory changed
406
401
407 def checknested(self, path):
402 def checknested(self, path):
408 return self._repo._checknested(self._repo.wjoin(path))
403 return self._repo._checknested(self._repo.wjoin(path))
409
404
410 def commit(self, text, user, date):
405 def commit(self, text, user, date):
411 self._repo.ui.debug("committing subrepo %s\n" % subrelpath(self))
406 self._repo.ui.debug("committing subrepo %s\n" % subrelpath(self))
412 n = self._repo.commit(text, user, date)
407 n = self._repo.commit(text, user, date)
413 if not n:
408 if not n:
414 return self._repo['.'].hex() # different version checked out
409 return self._repo['.'].hex() # different version checked out
415 return node.hex(n)
410 return node.hex(n)
416
411
417 def remove(self):
412 def remove(self):
418 # we can't fully delete the repository as it may contain
413 # we can't fully delete the repository as it may contain
419 # local-only history
414 # local-only history
420 self._repo.ui.note(_('removing subrepo %s\n') % subrelpath(self))
415 self._repo.ui.note(_('removing subrepo %s\n') % subrelpath(self))
421 hg.clean(self._repo, node.nullid, False)
416 hg.clean(self._repo, node.nullid, False)
422
417
423 def _get(self, state):
418 def _get(self, state):
424 source, revision, kind = state
419 source, revision, kind = state
425 try:
420 try:
426 self._repo.lookup(revision)
421 self._repo.lookup(revision)
427 except error.RepoError:
422 except error.RepoError:
428 self._repo._subsource = source
423 self._repo._subsource = source
429 srcurl = _abssource(self._repo)
424 srcurl = _abssource(self._repo)
430 self._repo.ui.status(_('pulling subrepo %s from %s\n')
425 self._repo.ui.status(_('pulling subrepo %s from %s\n')
431 % (subrelpath(self), srcurl))
426 % (subrelpath(self), srcurl))
432 other = hg.repository(self._repo.ui, srcurl)
427 other = hg.repository(self._repo.ui, srcurl)
433 self._repo.pull(other)
428 self._repo.pull(other)
434
429
435 def get(self, state):
430 def get(self, state):
436 self._get(state)
431 self._get(state)
437 source, revision, kind = state
432 source, revision, kind = state
438 self._repo.ui.debug("getting subrepo %s\n" % self._path)
433 self._repo.ui.debug("getting subrepo %s\n" % self._path)
439 hg.clean(self._repo, revision, False)
434 hg.clean(self._repo, revision, False)
440
435
441 def merge(self, state):
436 def merge(self, state):
442 self._get(state)
437 self._get(state)
443 cur = self._repo['.']
438 cur = self._repo['.']
444 dst = self._repo[state[1]]
439 dst = self._repo[state[1]]
445 anc = dst.ancestor(cur)
440 anc = dst.ancestor(cur)
446 if anc == cur:
441 if anc == cur:
447 self._repo.ui.debug("updating subrepo %s\n" % subrelpath(self))
442 self._repo.ui.debug("updating subrepo %s\n" % subrelpath(self))
448 hg.update(self._repo, state[1])
443 hg.update(self._repo, state[1])
449 elif anc == dst:
444 elif anc == dst:
450 self._repo.ui.debug("skipping subrepo %s\n" % subrelpath(self))
445 self._repo.ui.debug("skipping subrepo %s\n" % subrelpath(self))
451 else:
446 else:
452 self._repo.ui.debug("merging subrepo %s\n" % subrelpath(self))
447 self._repo.ui.debug("merging subrepo %s\n" % subrelpath(self))
453 hg.merge(self._repo, state[1], remind=False)
448 hg.merge(self._repo, state[1], remind=False)
454
449
455 def push(self, force):
450 def push(self, force):
456 # push subrepos depth-first for coherent ordering
451 # push subrepos depth-first for coherent ordering
457 c = self._repo['']
452 c = self._repo['']
458 subs = c.substate # only repos that are committed
453 subs = c.substate # only repos that are committed
459 for s in sorted(subs):
454 for s in sorted(subs):
460 if not c.sub(s).push(force):
455 if not c.sub(s).push(force):
461 return False
456 return False
462
457
463 dsturl = _abssource(self._repo, True)
458 dsturl = _abssource(self._repo, True)
464 self._repo.ui.status(_('pushing subrepo %s to %s\n') %
459 self._repo.ui.status(_('pushing subrepo %s to %s\n') %
465 (subrelpath(self), dsturl))
460 (subrelpath(self), dsturl))
466 other = hg.repository(self._repo.ui, dsturl)
461 other = hg.repository(self._repo.ui, dsturl)
467 return self._repo.push(other, force)
462 return self._repo.push(other, force)
468
463
469 def outgoing(self, ui, dest, opts):
464 def outgoing(self, ui, dest, opts):
470 return hg.outgoing(ui, self._repo, _abssource(self._repo, True), opts)
465 return hg.outgoing(ui, self._repo, _abssource(self._repo, True), opts)
471
466
472 def incoming(self, ui, source, opts):
467 def incoming(self, ui, source, opts):
473 return hg.incoming(ui, self._repo, _abssource(self._repo, False), opts)
468 return hg.incoming(ui, self._repo, _abssource(self._repo, False), opts)
474
469
475 def files(self):
470 def files(self):
476 rev = self._state[1]
471 rev = self._state[1]
477 ctx = self._repo[rev]
472 ctx = self._repo[rev]
478 return ctx.manifest()
473 return ctx.manifest()
479
474
480 def filedata(self, name):
475 def filedata(self, name):
481 rev = self._state[1]
476 rev = self._state[1]
482 return self._repo[rev][name].data()
477 return self._repo[rev][name].data()
483
478
484 def fileflags(self, name):
479 def fileflags(self, name):
485 rev = self._state[1]
480 rev = self._state[1]
486 ctx = self._repo[rev]
481 ctx = self._repo[rev]
487 return ctx.flags(name)
482 return ctx.flags(name)
488
483
489
484
490 class svnsubrepo(abstractsubrepo):
485 class svnsubrepo(abstractsubrepo):
491 def __init__(self, ctx, path, state):
486 def __init__(self, ctx, path, state):
492 self._path = path
487 self._path = path
493 self._state = state
488 self._state = state
494 self._ctx = ctx
489 self._ctx = ctx
495 self._ui = ctx._repo.ui
490 self._ui = ctx._repo.ui
496
491
497 def _svncommand(self, commands, filename=''):
492 def _svncommand(self, commands, filename=''):
498 path = os.path.join(self._ctx._repo.origroot, self._path, filename)
493 path = os.path.join(self._ctx._repo.origroot, self._path, filename)
499 cmd = ['svn'] + commands + [path]
494 cmd = ['svn'] + commands + [path]
500 env = dict(os.environ)
495 env = dict(os.environ)
501 # Avoid localized output, preserve current locale for everything else.
496 # Avoid localized output, preserve current locale for everything else.
502 env['LC_MESSAGES'] = 'C'
497 env['LC_MESSAGES'] = 'C'
503 p = subprocess.Popen(cmd, bufsize=-1, close_fds=util.closefds,
498 p = subprocess.Popen(cmd, bufsize=-1, close_fds=util.closefds,
504 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
499 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
505 universal_newlines=True, env=env)
500 universal_newlines=True, env=env)
506 stdout, stderr = p.communicate()
501 stdout, stderr = p.communicate()
507 stderr = stderr.strip()
502 stderr = stderr.strip()
508 if stderr:
503 if stderr:
509 raise util.Abort(stderr)
504 raise util.Abort(stderr)
510 return stdout
505 return stdout
511
506
512 def _wcrev(self):
507 def _wcrev(self):
513 output = self._svncommand(['info', '--xml'])
508 output = self._svncommand(['info', '--xml'])
514 doc = xml.dom.minidom.parseString(output)
509 doc = xml.dom.minidom.parseString(output)
515 entries = doc.getElementsByTagName('entry')
510 entries = doc.getElementsByTagName('entry')
516 if not entries:
511 if not entries:
517 return '0'
512 return '0'
518 return str(entries[0].getAttribute('revision')) or '0'
513 return str(entries[0].getAttribute('revision')) or '0'
519
514
520 def _wcchanged(self):
515 def _wcchanged(self):
521 """Return (changes, extchanges) where changes is True
516 """Return (changes, extchanges) where changes is True
522 if the working directory was changed, and extchanges is
517 if the working directory was changed, and extchanges is
523 True if any of these changes concern an external entry.
518 True if any of these changes concern an external entry.
524 """
519 """
525 output = self._svncommand(['status', '--xml'])
520 output = self._svncommand(['status', '--xml'])
526 externals, changes = [], []
521 externals, changes = [], []
527 doc = xml.dom.minidom.parseString(output)
522 doc = xml.dom.minidom.parseString(output)
528 for e in doc.getElementsByTagName('entry'):
523 for e in doc.getElementsByTagName('entry'):
529 s = e.getElementsByTagName('wc-status')
524 s = e.getElementsByTagName('wc-status')
530 if not s:
525 if not s:
531 continue
526 continue
532 item = s[0].getAttribute('item')
527 item = s[0].getAttribute('item')
533 props = s[0].getAttribute('props')
528 props = s[0].getAttribute('props')
534 path = e.getAttribute('path')
529 path = e.getAttribute('path')
535 if item == 'external':
530 if item == 'external':
536 externals.append(path)
531 externals.append(path)
537 if (item not in ('', 'normal', 'unversioned', 'external')
532 if (item not in ('', 'normal', 'unversioned', 'external')
538 or props not in ('', 'none')):
533 or props not in ('', 'none')):
539 changes.append(path)
534 changes.append(path)
540 for path in changes:
535 for path in changes:
541 for ext in externals:
536 for ext in externals:
542 if path == ext or path.startswith(ext + os.sep):
537 if path == ext or path.startswith(ext + os.sep):
543 return True, True
538 return True, True
544 return bool(changes), False
539 return bool(changes), False
545
540
546 def dirty(self):
541 def dirty(self):
547 if self._wcrev() == self._state[1] and not self._wcchanged()[0]:
542 if self._wcrev() == self._state[1] and not self._wcchanged()[0]:
548 return False
543 return False
549 return True
544 return True
550
545
551 def commit(self, text, user, date):
546 def commit(self, text, user, date):
552 # user and date are out of our hands since svn is centralized
547 # user and date are out of our hands since svn is centralized
553 changed, extchanged = self._wcchanged()
548 changed, extchanged = self._wcchanged()
554 if not changed:
549 if not changed:
555 return self._wcrev()
550 return self._wcrev()
556 if extchanged:
551 if extchanged:
557 # Do not try to commit externals
552 # Do not try to commit externals
558 raise util.Abort(_('cannot commit svn externals'))
553 raise util.Abort(_('cannot commit svn externals'))
559 commitinfo = self._svncommand(['commit', '-m', text])
554 commitinfo = self._svncommand(['commit', '-m', text])
560 self._ui.status(commitinfo)
555 self._ui.status(commitinfo)
561 newrev = re.search('Committed revision ([0-9]+).', commitinfo)
556 newrev = re.search('Committed revision ([0-9]+).', commitinfo)
562 if not newrev:
557 if not newrev:
563 raise util.Abort(commitinfo.splitlines()[-1])
558 raise util.Abort(commitinfo.splitlines()[-1])
564 newrev = newrev.groups()[0]
559 newrev = newrev.groups()[0]
565 self._ui.status(self._svncommand(['update', '-r', newrev]))
560 self._ui.status(self._svncommand(['update', '-r', newrev]))
566 return newrev
561 return newrev
567
562
568 def remove(self):
563 def remove(self):
569 if self.dirty():
564 if self.dirty():
570 self._ui.warn(_('not removing repo %s because '
565 self._ui.warn(_('not removing repo %s because '
571 'it has changes.\n' % self._path))
566 'it has changes.\n' % self._path))
572 return
567 return
573 self._ui.note(_('removing subrepo %s\n') % self._path)
568 self._ui.note(_('removing subrepo %s\n') % self._path)
574
569
575 def onerror(function, path, excinfo):
570 def onerror(function, path, excinfo):
576 if function is not os.remove:
571 if function is not os.remove:
577 raise
572 raise
578 # read-only files cannot be unlinked under Windows
573 # read-only files cannot be unlinked under Windows
579 s = os.stat(path)
574 s = os.stat(path)
580 if (s.st_mode & stat.S_IWRITE) != 0:
575 if (s.st_mode & stat.S_IWRITE) != 0:
581 raise
576 raise
582 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
577 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
583 os.remove(path)
578 os.remove(path)
584
579
585 path = self._ctx._repo.wjoin(self._path)
580 path = self._ctx._repo.wjoin(self._path)
586 shutil.rmtree(path, onerror=onerror)
581 shutil.rmtree(path, onerror=onerror)
587 try:
582 try:
588 os.removedirs(os.path.dirname(path))
583 os.removedirs(os.path.dirname(path))
589 except OSError:
584 except OSError:
590 pass
585 pass
591
586
592 def get(self, state):
587 def get(self, state):
593 status = self._svncommand(['checkout', state[0], '--revision', state[1]])
588 status = self._svncommand(['checkout', state[0], '--revision', state[1]])
594 if not re.search('Checked out revision [0-9]+.', status):
589 if not re.search('Checked out revision [0-9]+.', status):
595 raise util.Abort(status.splitlines()[-1])
590 raise util.Abort(status.splitlines()[-1])
596 self._ui.status(status)
591 self._ui.status(status)
597
592
598 def merge(self, state):
593 def merge(self, state):
599 old = int(self._state[1])
594 old = int(self._state[1])
600 new = int(state[1])
595 new = int(state[1])
601 if new > old:
596 if new > old:
602 self.get(state)
597 self.get(state)
603
598
604 def push(self, force):
599 def push(self, force):
605 # push is a no-op for SVN
600 # push is a no-op for SVN
606 return True
601 return True
607
602
608 def files(self):
603 def files(self):
609 output = self._svncommand(['list'])
604 output = self._svncommand(['list'])
610 # This works because svn forbids \n in filenames.
605 # This works because svn forbids \n in filenames.
611 return output.splitlines()
606 return output.splitlines()
612
607
613 def filedata(self, name):
608 def filedata(self, name):
614 return self._svncommand(['cat'], name)
609 return self._svncommand(['cat'], name)
615
610
616
611
617 class gitsubrepo(abstractsubrepo):
612 class gitsubrepo(abstractsubrepo):
618 def __init__(self, ctx, path, state):
613 def __init__(self, ctx, path, state):
619 # TODO add git version check.
614 # TODO add git version check.
620 self._state = state
615 self._state = state
621 self._ctx = ctx
616 self._ctx = ctx
622 self._relpath = path
617 self._relpath = path
623 self._path = ctx._repo.wjoin(path)
618 self._path = ctx._repo.wjoin(path)
624 self._ui = ctx._repo.ui
619 self._ui = ctx._repo.ui
625
620
626 def _gitcommand(self, commands, env=None, stream=False):
621 def _gitcommand(self, commands, env=None, stream=False):
627 return self._gitdir(commands, env=env, stream=stream)[0]
622 return self._gitdir(commands, env=env, stream=stream)[0]
628
623
629 def _gitdir(self, commands, env=None, stream=False):
624 def _gitdir(self, commands, env=None, stream=False):
630 return self._gitnodir(commands, env=env, stream=stream, cwd=self._path)
625 return self._gitnodir(commands, env=env, stream=stream, cwd=self._path)
631
626
632 def _gitnodir(self, commands, env=None, stream=False, cwd=None):
627 def _gitnodir(self, commands, env=None, stream=False, cwd=None):
633 """Calls the git command
628 """Calls the git command
634
629
635 The methods tries to call the git command. versions previor to 1.6.0
630 The methods tries to call the git command. versions previor to 1.6.0
636 are not supported and very probably fail.
631 are not supported and very probably fail.
637 """
632 """
638 self._ui.debug('%s: git %s\n' % (self._relpath, ' '.join(commands)))
633 self._ui.debug('%s: git %s\n' % (self._relpath, ' '.join(commands)))
639 # unless ui.quiet is set, print git's stderr,
634 # unless ui.quiet is set, print git's stderr,
640 # which is mostly progress and useful info
635 # which is mostly progress and useful info
641 errpipe = None
636 errpipe = None
642 if self._ui.quiet:
637 if self._ui.quiet:
643 errpipe = open(os.devnull, 'w')
638 errpipe = open(os.devnull, 'w')
644 p = subprocess.Popen(['git'] + commands, bufsize=-1, cwd=cwd, env=env,
639 p = subprocess.Popen(['git'] + commands, bufsize=-1, cwd=cwd, env=env,
645 close_fds=util.closefds,
640 close_fds=util.closefds,
646 stdout=subprocess.PIPE, stderr=errpipe)
641 stdout=subprocess.PIPE, stderr=errpipe)
647 if stream:
642 if stream:
648 return p.stdout, None
643 return p.stdout, None
649
644
650 retdata = p.stdout.read().strip()
645 retdata = p.stdout.read().strip()
651 # wait for the child to exit to avoid race condition.
646 # wait for the child to exit to avoid race condition.
652 p.wait()
647 p.wait()
653
648
654 if p.returncode != 0 and p.returncode != 1:
649 if p.returncode != 0 and p.returncode != 1:
655 # there are certain error codes that are ok
650 # there are certain error codes that are ok
656 command = commands[0]
651 command = commands[0]
657 if command in ('cat-file', 'symbolic-ref'):
652 if command in ('cat-file', 'symbolic-ref'):
658 return retdata, p.returncode
653 return retdata, p.returncode
659 # for all others, abort
654 # for all others, abort
660 raise util.Abort('git %s error %d in %s' %
655 raise util.Abort('git %s error %d in %s' %
661 (command, p.returncode, self._relpath))
656 (command, p.returncode, self._relpath))
662
657
663 return retdata, p.returncode
658 return retdata, p.returncode
664
659
665 def _gitstate(self):
660 def _gitstate(self):
666 return self._gitcommand(['rev-parse', 'HEAD'])
661 return self._gitcommand(['rev-parse', 'HEAD'])
667
662
668 def _gitcurrentbranch(self):
663 def _gitcurrentbranch(self):
669 current, err = self._gitdir(['symbolic-ref', 'HEAD', '--quiet'])
664 current, err = self._gitdir(['symbolic-ref', 'HEAD', '--quiet'])
670 if err:
665 if err:
671 current = None
666 current = None
672 return current
667 return current
673
668
674 def _githavelocally(self, revision):
669 def _githavelocally(self, revision):
675 out, code = self._gitdir(['cat-file', '-e', revision])
670 out, code = self._gitdir(['cat-file', '-e', revision])
676 return code == 0
671 return code == 0
677
672
678 def _gitisancestor(self, r1, r2):
673 def _gitisancestor(self, r1, r2):
679 base = self._gitcommand(['merge-base', r1, r2])
674 base = self._gitcommand(['merge-base', r1, r2])
680 return base == r1
675 return base == r1
681
676
682 def _gitbranchmap(self):
677 def _gitbranchmap(self):
683 '''returns 3 things:
678 '''returns 3 things:
684 a map from git branch to revision
679 a map from git branch to revision
685 a map from revision to branches
680 a map from revision to branches
686 a map from remote branch to local tracking branch'''
681 a map from remote branch to local tracking branch'''
687 branch2rev = {}
682 branch2rev = {}
688 rev2branch = {}
683 rev2branch = {}
689 tracking = {}
684 tracking = {}
690 out = self._gitcommand(['for-each-ref', '--format',
685 out = self._gitcommand(['for-each-ref', '--format',
691 '%(objectname) %(refname) %(upstream) end'])
686 '%(objectname) %(refname) %(upstream) end'])
692 for line in out.split('\n'):
687 for line in out.split('\n'):
693 revision, ref, upstream = line.split(' ')[:3]
688 revision, ref, upstream = line.split(' ')[:3]
694 if ref.startswith('refs/tags/'):
689 if ref.startswith('refs/tags/'):
695 continue
690 continue
696 if ref.startswith('refs/remotes/') and ref.endswith('/HEAD'):
691 if ref.startswith('refs/remotes/') and ref.endswith('/HEAD'):
697 continue # ignore remote/HEAD redirects
692 continue # ignore remote/HEAD redirects
698 branch2rev[ref] = revision
693 branch2rev[ref] = revision
699 rev2branch.setdefault(revision, []).append(ref)
694 rev2branch.setdefault(revision, []).append(ref)
700 if upstream:
695 if upstream:
701 # assumes no more than one local tracking branch for a remote
696 # assumes no more than one local tracking branch for a remote
702 tracking[upstream] = ref
697 tracking[upstream] = ref
703 return branch2rev, rev2branch, tracking
698 return branch2rev, rev2branch, tracking
704
699
705 def _fetch(self, source, revision):
700 def _fetch(self, source, revision):
706 if not os.path.exists('%s/.git' % self._path):
701 if not os.path.exists('%s/.git' % self._path):
707 self._ui.status(_('cloning subrepo %s\n') % self._relpath)
702 self._ui.status(_('cloning subrepo %s\n') % self._relpath)
708 self._gitnodir(['clone', source, self._path])
703 self._gitnodir(['clone', source, self._path])
709 if self._githavelocally(revision):
704 if self._githavelocally(revision):
710 return
705 return
711 self._ui.status(_('pulling subrepo %s\n') % self._relpath)
706 self._ui.status(_('pulling subrepo %s\n') % self._relpath)
712 # first try from origin
707 # first try from origin
713 self._gitcommand(['fetch'])
708 self._gitcommand(['fetch'])
714 if self._githavelocally(revision):
709 if self._githavelocally(revision):
715 return
710 return
716 # then try from known subrepo source
711 # then try from known subrepo source
717 self._gitcommand(['fetch', source])
712 self._gitcommand(['fetch', source])
718 if not self._githavelocally(revision):
713 if not self._githavelocally(revision):
719 raise util.Abort(_("revision %s does not exist in subrepo %s\n") %
714 raise util.Abort(_("revision %s does not exist in subrepo %s\n") %
720 (revision, self._path))
715 (revision, self._path))
721
716
722 def dirty(self):
717 def dirty(self):
723 if self._state[1] != self._gitstate(): # version checked out changed?
718 if self._state[1] != self._gitstate(): # version checked out changed?
724 return True
719 return True
725 # check for staged changes or modified files; ignore untracked files
720 # check for staged changes or modified files; ignore untracked files
726 out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
721 out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
727 return code == 1
722 return code == 1
728
723
729 def get(self, state):
724 def get(self, state):
730 source, revision, kind = state
725 source, revision, kind = state
731 self._fetch(source, revision)
726 self._fetch(source, revision)
732 # if the repo was set to be bare, unbare it
727 # if the repo was set to be bare, unbare it
733 if self._gitcommand(['config', '--bool', 'core.bare']) == 'true':
728 if self._gitcommand(['config', '--bool', 'core.bare']) == 'true':
734 self._gitcommand(['config', 'core.bare', 'false'])
729 self._gitcommand(['config', 'core.bare', 'false'])
735 if self._gitstate() == revision:
730 if self._gitstate() == revision:
736 self._gitcommand(['reset', '--hard', 'HEAD'])
731 self._gitcommand(['reset', '--hard', 'HEAD'])
737 return
732 return
738 elif self._gitstate() == revision:
733 elif self._gitstate() == revision:
739 return
734 return
740 branch2rev, rev2branch, tracking = self._gitbranchmap()
735 branch2rev, rev2branch, tracking = self._gitbranchmap()
741
736
742 def rawcheckout():
737 def rawcheckout():
743 # no branch to checkout, check it out with no branch
738 # no branch to checkout, check it out with no branch
744 self._ui.warn(_('checking out detached HEAD in subrepo %s\n') %
739 self._ui.warn(_('checking out detached HEAD in subrepo %s\n') %
745 self._relpath)
740 self._relpath)
746 self._ui.warn(_('check out a git branch if you intend '
741 self._ui.warn(_('check out a git branch if you intend '
747 'to make changes\n'))
742 'to make changes\n'))
748 self._gitcommand(['checkout', '-q', revision])
743 self._gitcommand(['checkout', '-q', revision])
749
744
750 if revision not in rev2branch:
745 if revision not in rev2branch:
751 rawcheckout()
746 rawcheckout()
752 return
747 return
753 branches = rev2branch[revision]
748 branches = rev2branch[revision]
754 firstlocalbranch = None
749 firstlocalbranch = None
755 for b in branches:
750 for b in branches:
756 if b == 'refs/heads/master':
751 if b == 'refs/heads/master':
757 # master trumps all other branches
752 # master trumps all other branches
758 self._gitcommand(['checkout', 'refs/heads/master'])
753 self._gitcommand(['checkout', 'refs/heads/master'])
759 return
754 return
760 if not firstlocalbranch and not b.startswith('refs/remotes/'):
755 if not firstlocalbranch and not b.startswith('refs/remotes/'):
761 firstlocalbranch = b
756 firstlocalbranch = b
762 if firstlocalbranch:
757 if firstlocalbranch:
763 self._gitcommand(['checkout', firstlocalbranch])
758 self._gitcommand(['checkout', firstlocalbranch])
764 return
759 return
765
760
766 # choose a remote branch already tracked if possible
761 # choose a remote branch already tracked if possible
767 remote = branches[0]
762 remote = branches[0]
768 if remote not in tracking:
763 if remote not in tracking:
769 for b in branches:
764 for b in branches:
770 if b in tracking:
765 if b in tracking:
771 remote = b
766 remote = b
772 break
767 break
773
768
774 if remote not in tracking:
769 if remote not in tracking:
775 # create a new local tracking branch
770 # create a new local tracking branch
776 local = remote.split('/', 2)[2]
771 local = remote.split('/', 2)[2]
777 self._gitcommand(['checkout', '-b', local, remote])
772 self._gitcommand(['checkout', '-b', local, remote])
778 elif self._gitisancestor(branch2rev[tracking[remote]], remote):
773 elif self._gitisancestor(branch2rev[tracking[remote]], remote):
779 # When updating to a tracked remote branch,
774 # When updating to a tracked remote branch,
780 # if the local tracking branch is downstream of it,
775 # if the local tracking branch is downstream of it,
781 # a normal `git pull` would have performed a "fast-forward merge"
776 # a normal `git pull` would have performed a "fast-forward merge"
782 # which is equivalent to updating the local branch to the remote.
777 # which is equivalent to updating the local branch to the remote.
783 # Since we are only looking at branching at update, we need to
778 # Since we are only looking at branching at update, we need to
784 # detect this situation and perform this action lazily.
779 # detect this situation and perform this action lazily.
785 if tracking[remote] != self._gitcurrentbranch():
780 if tracking[remote] != self._gitcurrentbranch():
786 self._gitcommand(['checkout', tracking[remote]])
781 self._gitcommand(['checkout', tracking[remote]])
787 self._gitcommand(['merge', '--ff', remote])
782 self._gitcommand(['merge', '--ff', remote])
788 else:
783 else:
789 # a real merge would be required, just checkout the revision
784 # a real merge would be required, just checkout the revision
790 rawcheckout()
785 rawcheckout()
791
786
792 def commit(self, text, user, date):
787 def commit(self, text, user, date):
793 cmd = ['commit', '-a', '-m', text]
788 cmd = ['commit', '-a', '-m', text]
794 env = os.environ.copy()
789 env = os.environ.copy()
795 if user:
790 if user:
796 cmd += ['--author', user]
791 cmd += ['--author', user]
797 if date:
792 if date:
798 # git's date parser silently ignores when seconds < 1e9
793 # git's date parser silently ignores when seconds < 1e9
799 # convert to ISO8601
794 # convert to ISO8601
800 env['GIT_AUTHOR_DATE'] = util.datestr(date,
795 env['GIT_AUTHOR_DATE'] = util.datestr(date,
801 '%Y-%m-%dT%H:%M:%S %1%2')
796 '%Y-%m-%dT%H:%M:%S %1%2')
802 self._gitcommand(cmd, env=env)
797 self._gitcommand(cmd, env=env)
803 # make sure commit works otherwise HEAD might not exist under certain
798 # make sure commit works otherwise HEAD might not exist under certain
804 # circumstances
799 # circumstances
805 return self._gitstate()
800 return self._gitstate()
806
801
807 def merge(self, state):
802 def merge(self, state):
808 source, revision, kind = state
803 source, revision, kind = state
809 self._fetch(source, revision)
804 self._fetch(source, revision)
810 base = self._gitcommand(['merge-base', revision, self._state[1]])
805 base = self._gitcommand(['merge-base', revision, self._state[1]])
811 if base == revision:
806 if base == revision:
812 self.get(state) # fast forward merge
807 self.get(state) # fast forward merge
813 elif base != self._state[1]:
808 elif base != self._state[1]:
814 self._gitcommand(['merge', '--no-commit', revision])
809 self._gitcommand(['merge', '--no-commit', revision])
815
810
816 def push(self, force):
811 def push(self, force):
817 # if a branch in origin contains the revision, nothing to do
812 # if a branch in origin contains the revision, nothing to do
818 branch2rev, rev2branch, tracking = self._gitbranchmap()
813 branch2rev, rev2branch, tracking = self._gitbranchmap()
819 if self._state[1] in rev2branch:
814 if self._state[1] in rev2branch:
820 for b in rev2branch[self._state[1]]:
815 for b in rev2branch[self._state[1]]:
821 if b.startswith('refs/remotes/origin/'):
816 if b.startswith('refs/remotes/origin/'):
822 return True
817 return True
823 for b, revision in branch2rev.iteritems():
818 for b, revision in branch2rev.iteritems():
824 if b.startswith('refs/remotes/origin/'):
819 if b.startswith('refs/remotes/origin/'):
825 if self._gitisancestor(self._state[1], revision):
820 if self._gitisancestor(self._state[1], revision):
826 return True
821 return True
827 # otherwise, try to push the currently checked out branch
822 # otherwise, try to push the currently checked out branch
828 cmd = ['push']
823 cmd = ['push']
829 if force:
824 if force:
830 cmd.append('--force')
825 cmd.append('--force')
831
826
832 current = self._gitcurrentbranch()
827 current = self._gitcurrentbranch()
833 if current:
828 if current:
834 # determine if the current branch is even useful
829 # determine if the current branch is even useful
835 if not self._gitisancestor(self._state[1], current):
830 if not self._gitisancestor(self._state[1], current):
836 self._ui.warn(_('unrelated git branch checked out '
831 self._ui.warn(_('unrelated git branch checked out '
837 'in subrepo %s\n') % self._relpath)
832 'in subrepo %s\n') % self._relpath)
838 return False
833 return False
839 self._ui.status(_('pushing branch %s of subrepo %s\n') %
834 self._ui.status(_('pushing branch %s of subrepo %s\n') %
840 (current.split('/', 2)[2], self._relpath))
835 (current.split('/', 2)[2], self._relpath))
841 self._gitcommand(cmd + ['origin', current])
836 self._gitcommand(cmd + ['origin', current])
842 return True
837 return True
843 else:
838 else:
844 self._ui.warn(_('no branch checked out in subrepo %s\n'
839 self._ui.warn(_('no branch checked out in subrepo %s\n'
845 'cannot push revision %s') %
840 'cannot push revision %s') %
846 (self._relpath, self._state[1]))
841 (self._relpath, self._state[1]))
847 return False
842 return False
848
843
849 def remove(self):
844 def remove(self):
850 if self.dirty():
845 if self.dirty():
851 self._ui.warn(_('not removing repo %s because '
846 self._ui.warn(_('not removing repo %s because '
852 'it has changes.\n') % self._path)
847 'it has changes.\n') % self._path)
853 return
848 return
854 # we can't fully delete the repository as it may contain
849 # we can't fully delete the repository as it may contain
855 # local-only history
850 # local-only history
856 self._ui.note(_('removing subrepo %s\n') % self._path)
851 self._ui.note(_('removing subrepo %s\n') % self._path)
857 self._gitcommand(['config', 'core.bare', 'true'])
852 self._gitcommand(['config', 'core.bare', 'true'])
858 for f in os.listdir(self._path):
853 for f in os.listdir(self._path):
859 if f == '.git':
854 if f == '.git':
860 continue
855 continue
861 path = os.path.join(self._path, f)
856 path = os.path.join(self._path, f)
862 if os.path.isdir(path) and not os.path.islink(path):
857 if os.path.isdir(path) and not os.path.islink(path):
863 shutil.rmtree(path)
858 shutil.rmtree(path)
864 else:
859 else:
865 os.remove(path)
860 os.remove(path)
866
861
867 def archive(self, ui, archiver, prefix):
862 def archive(self, ui, archiver, prefix):
868 source, revision = self._state
863 source, revision = self._state
869 self._fetch(source, revision)
864 self._fetch(source, revision)
870
865
871 # Parse git's native archive command.
866 # Parse git's native archive command.
872 # This should be much faster than manually traversing the trees
867 # This should be much faster than manually traversing the trees
873 # and objects with many subprocess calls.
868 # and objects with many subprocess calls.
874 tarstream = self._gitcommand(['archive', revision], stream=True)
869 tarstream = self._gitcommand(['archive', revision], stream=True)
875 tar = tarfile.open(fileobj=tarstream, mode='r|')
870 tar = tarfile.open(fileobj=tarstream, mode='r|')
876 relpath = subrelpath(self)
871 relpath = subrelpath(self)
877 ui.progress(_('archiving (%s)') % relpath, 0, unit=_('files'))
872 ui.progress(_('archiving (%s)') % relpath, 0, unit=_('files'))
878 for i, info in enumerate(tar):
873 for i, info in enumerate(tar):
879 archiver.addfile(os.path.join(prefix, self._relpath, info.name),
874 archiver.addfile(os.path.join(prefix, self._relpath, info.name),
880 info.mode, info.issym(),
875 info.mode, info.issym(),
881 tar.extractfile(info).read())
876 tar.extractfile(info).read())
882 ui.progress(_('archiving (%s)') % relpath, i + 1,
877 ui.progress(_('archiving (%s)') % relpath, i + 1,
883 unit=_('files'))
878 unit=_('files'))
884 ui.progress(_('archiving (%s)') % relpath, None)
879 ui.progress(_('archiving (%s)') % relpath, None)
885
880
886
881
887 types = {
882 types = {
888 'hg': hgsubrepo,
883 'hg': hgsubrepo,
889 'svn': svnsubrepo,
884 'svn': svnsubrepo,
890 'git': gitsubrepo,
885 'git': gitsubrepo,
891 }
886 }
1 NO CONTENT: file was removed
NO CONTENT: file was removed
General Comments 0
You need to be logged in to leave comments. Login now