##// END OF EJS Templates
merge with stable
Matt Mackall -
r15735:5b384b7f merge default
parent child Browse files
Show More
@@ -1,2219 +1,2220
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import scmutil, util, extensions, hook, error, revset
13 import scmutil, util, extensions, hook, error, revset
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, errno, os, time, inspect
18 import weakref, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20 filecache = scmutil.filecache
20 filecache = scmutil.filecache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 'known', 'getbundle'))
24 'known', 'getbundle'))
25 supportedformats = set(('revlogv1', 'generaldelta'))
25 supportedformats = set(('revlogv1', 'generaldelta'))
26 supported = supportedformats | set(('store', 'fncache', 'shared',
26 supported = supportedformats | set(('store', 'fncache', 'shared',
27 'dotencode'))
27 'dotencode'))
28
28
29 def __init__(self, baseui, path=None, create=False):
29 def __init__(self, baseui, path=None, create=False):
30 repo.repository.__init__(self)
30 repo.repository.__init__(self)
31 self.root = os.path.realpath(util.expandpath(path))
31 self.root = os.path.realpath(util.expandpath(path))
32 self.path = os.path.join(self.root, ".hg")
32 self.path = os.path.join(self.root, ".hg")
33 self.origroot = path
33 self.origroot = path
34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
35 self.opener = scmutil.opener(self.path)
35 self.opener = scmutil.opener(self.path)
36 self.wopener = scmutil.opener(self.root)
36 self.wopener = scmutil.opener(self.root)
37 self.baseui = baseui
37 self.baseui = baseui
38 self.ui = baseui.copy()
38 self.ui = baseui.copy()
39 self._dirtyphases = False
39 self._dirtyphases = False
40
40
41 try:
41 try:
42 self.ui.readconfig(self.join("hgrc"), self.root)
42 self.ui.readconfig(self.join("hgrc"), self.root)
43 extensions.loadall(self.ui)
43 extensions.loadall(self.ui)
44 except IOError:
44 except IOError:
45 pass
45 pass
46
46
47 if not os.path.isdir(self.path):
47 if not os.path.isdir(self.path):
48 if create:
48 if create:
49 if not os.path.exists(path):
49 if not os.path.exists(path):
50 util.makedirs(path)
50 util.makedirs(path)
51 util.makedir(self.path, notindexed=True)
51 util.makedir(self.path, notindexed=True)
52 requirements = ["revlogv1"]
52 requirements = ["revlogv1"]
53 if self.ui.configbool('format', 'usestore', True):
53 if self.ui.configbool('format', 'usestore', True):
54 os.mkdir(os.path.join(self.path, "store"))
54 os.mkdir(os.path.join(self.path, "store"))
55 requirements.append("store")
55 requirements.append("store")
56 if self.ui.configbool('format', 'usefncache', True):
56 if self.ui.configbool('format', 'usefncache', True):
57 requirements.append("fncache")
57 requirements.append("fncache")
58 if self.ui.configbool('format', 'dotencode', True):
58 if self.ui.configbool('format', 'dotencode', True):
59 requirements.append('dotencode')
59 requirements.append('dotencode')
60 # create an invalid changelog
60 # create an invalid changelog
61 self.opener.append(
61 self.opener.append(
62 "00changelog.i",
62 "00changelog.i",
63 '\0\0\0\2' # represents revlogv2
63 '\0\0\0\2' # represents revlogv2
64 ' dummy changelog to prevent using the old repo layout'
64 ' dummy changelog to prevent using the old repo layout'
65 )
65 )
66 if self.ui.configbool('format', 'generaldelta', False):
66 if self.ui.configbool('format', 'generaldelta', False):
67 requirements.append("generaldelta")
67 requirements.append("generaldelta")
68 requirements = set(requirements)
68 requirements = set(requirements)
69 else:
69 else:
70 raise error.RepoError(_("repository %s not found") % path)
70 raise error.RepoError(_("repository %s not found") % path)
71 elif create:
71 elif create:
72 raise error.RepoError(_("repository %s already exists") % path)
72 raise error.RepoError(_("repository %s already exists") % path)
73 else:
73 else:
74 try:
74 try:
75 requirements = scmutil.readrequires(self.opener, self.supported)
75 requirements = scmutil.readrequires(self.opener, self.supported)
76 except IOError, inst:
76 except IOError, inst:
77 if inst.errno != errno.ENOENT:
77 if inst.errno != errno.ENOENT:
78 raise
78 raise
79 requirements = set()
79 requirements = set()
80
80
81 self.sharedpath = self.path
81 self.sharedpath = self.path
82 try:
82 try:
83 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
83 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
84 if not os.path.exists(s):
84 if not os.path.exists(s):
85 raise error.RepoError(
85 raise error.RepoError(
86 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 _('.hg/sharedpath points to nonexistent directory %s') % s)
87 self.sharedpath = s
87 self.sharedpath = s
88 except IOError, inst:
88 except IOError, inst:
89 if inst.errno != errno.ENOENT:
89 if inst.errno != errno.ENOENT:
90 raise
90 raise
91
91
92 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
92 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
93 self.spath = self.store.path
93 self.spath = self.store.path
94 self.sopener = self.store.opener
94 self.sopener = self.store.opener
95 self.sjoin = self.store.join
95 self.sjoin = self.store.join
96 self.opener.createmode = self.store.createmode
96 self.opener.createmode = self.store.createmode
97 self._applyrequirements(requirements)
97 self._applyrequirements(requirements)
98 if create:
98 if create:
99 self._writerequirements()
99 self._writerequirements()
100
100
101
101
102 self._branchcache = None
102 self._branchcache = None
103 self._branchcachetip = None
103 self._branchcachetip = None
104 self.filterpats = {}
104 self.filterpats = {}
105 self._datafilters = {}
105 self._datafilters = {}
106 self._transref = self._lockref = self._wlockref = None
106 self._transref = self._lockref = self._wlockref = None
107
107
108 # A cache for various files under .hg/ that tracks file changes,
108 # A cache for various files under .hg/ that tracks file changes,
109 # (used by the filecache decorator)
109 # (used by the filecache decorator)
110 #
110 #
111 # Maps a property name to its util.filecacheentry
111 # Maps a property name to its util.filecacheentry
112 self._filecache = {}
112 self._filecache = {}
113
113
114 def _applyrequirements(self, requirements):
114 def _applyrequirements(self, requirements):
115 self.requirements = requirements
115 self.requirements = requirements
116 openerreqs = set(('revlogv1', 'generaldelta'))
116 openerreqs = set(('revlogv1', 'generaldelta'))
117 self.sopener.options = dict((r, 1) for r in requirements
117 self.sopener.options = dict((r, 1) for r in requirements
118 if r in openerreqs)
118 if r in openerreqs)
119
119
120 def _writerequirements(self):
120 def _writerequirements(self):
121 reqfile = self.opener("requires", "w")
121 reqfile = self.opener("requires", "w")
122 for r in self.requirements:
122 for r in self.requirements:
123 reqfile.write("%s\n" % r)
123 reqfile.write("%s\n" % r)
124 reqfile.close()
124 reqfile.close()
125
125
126 def _checknested(self, path):
126 def _checknested(self, path):
127 """Determine if path is a legal nested repository."""
127 """Determine if path is a legal nested repository."""
128 if not path.startswith(self.root):
128 if not path.startswith(self.root):
129 return False
129 return False
130 subpath = path[len(self.root) + 1:]
130 subpath = path[len(self.root) + 1:]
131 normsubpath = util.pconvert(subpath)
131
132
132 # XXX: Checking against the current working copy is wrong in
133 # XXX: Checking against the current working copy is wrong in
133 # the sense that it can reject things like
134 # the sense that it can reject things like
134 #
135 #
135 # $ hg cat -r 10 sub/x.txt
136 # $ hg cat -r 10 sub/x.txt
136 #
137 #
137 # if sub/ is no longer a subrepository in the working copy
138 # if sub/ is no longer a subrepository in the working copy
138 # parent revision.
139 # parent revision.
139 #
140 #
140 # However, it can of course also allow things that would have
141 # However, it can of course also allow things that would have
141 # been rejected before, such as the above cat command if sub/
142 # been rejected before, such as the above cat command if sub/
142 # is a subrepository now, but was a normal directory before.
143 # is a subrepository now, but was a normal directory before.
143 # The old path auditor would have rejected by mistake since it
144 # The old path auditor would have rejected by mistake since it
144 # panics when it sees sub/.hg/.
145 # panics when it sees sub/.hg/.
145 #
146 #
146 # All in all, checking against the working copy seems sensible
147 # All in all, checking against the working copy seems sensible
147 # since we want to prevent access to nested repositories on
148 # since we want to prevent access to nested repositories on
148 # the filesystem *now*.
149 # the filesystem *now*.
149 ctx = self[None]
150 ctx = self[None]
150 parts = util.splitpath(subpath)
151 parts = util.splitpath(subpath)
151 while parts:
152 while parts:
152 prefix = os.sep.join(parts)
153 prefix = '/'.join(parts)
153 if prefix in ctx.substate:
154 if prefix in ctx.substate:
154 if prefix == subpath:
155 if prefix == normsubpath:
155 return True
156 return True
156 else:
157 else:
157 sub = ctx.sub(prefix)
158 sub = ctx.sub(prefix)
158 return sub.checknested(subpath[len(prefix) + 1:])
159 return sub.checknested(subpath[len(prefix) + 1:])
159 else:
160 else:
160 parts.pop()
161 parts.pop()
161 return False
162 return False
162
163
163 @filecache('bookmarks')
164 @filecache('bookmarks')
164 def _bookmarks(self):
165 def _bookmarks(self):
165 return bookmarks.read(self)
166 return bookmarks.read(self)
166
167
167 @filecache('bookmarks.current')
168 @filecache('bookmarks.current')
168 def _bookmarkcurrent(self):
169 def _bookmarkcurrent(self):
169 return bookmarks.readcurrent(self)
170 return bookmarks.readcurrent(self)
170
171
171 def _writebookmarks(self, marks):
172 def _writebookmarks(self, marks):
172 bookmarks.write(self)
173 bookmarks.write(self)
173
174
174 @filecache('phaseroots')
175 @filecache('phaseroots')
175 def _phaseroots(self):
176 def _phaseroots(self):
176 self._dirtyphases = False
177 self._dirtyphases = False
177 phaseroots = phases.readroots(self)
178 phaseroots = phases.readroots(self)
178 phases.filterunknown(self, phaseroots)
179 phases.filterunknown(self, phaseroots)
179 return phaseroots
180 return phaseroots
180
181
181 @propertycache
182 @propertycache
182 def _phaserev(self):
183 def _phaserev(self):
183 cache = [0] * len(self)
184 cache = [0] * len(self)
184 for phase in phases.trackedphases:
185 for phase in phases.trackedphases:
185 roots = map(self.changelog.rev, self._phaseroots[phase])
186 roots = map(self.changelog.rev, self._phaseroots[phase])
186 if roots:
187 if roots:
187 for rev in roots:
188 for rev in roots:
188 cache[rev] = phase
189 cache[rev] = phase
189 for rev in self.changelog.descendants(*roots):
190 for rev in self.changelog.descendants(*roots):
190 cache[rev] = phase
191 cache[rev] = phase
191 return cache
192 return cache
192
193
193 @filecache('00changelog.i', True)
194 @filecache('00changelog.i', True)
194 def changelog(self):
195 def changelog(self):
195 c = changelog.changelog(self.sopener)
196 c = changelog.changelog(self.sopener)
196 if 'HG_PENDING' in os.environ:
197 if 'HG_PENDING' in os.environ:
197 p = os.environ['HG_PENDING']
198 p = os.environ['HG_PENDING']
198 if p.startswith(self.root):
199 if p.startswith(self.root):
199 c.readpending('00changelog.i.a')
200 c.readpending('00changelog.i.a')
200 return c
201 return c
201
202
202 @filecache('00manifest.i', True)
203 @filecache('00manifest.i', True)
203 def manifest(self):
204 def manifest(self):
204 return manifest.manifest(self.sopener)
205 return manifest.manifest(self.sopener)
205
206
206 @filecache('dirstate')
207 @filecache('dirstate')
207 def dirstate(self):
208 def dirstate(self):
208 warned = [0]
209 warned = [0]
209 def validate(node):
210 def validate(node):
210 try:
211 try:
211 self.changelog.rev(node)
212 self.changelog.rev(node)
212 return node
213 return node
213 except error.LookupError:
214 except error.LookupError:
214 if not warned[0]:
215 if not warned[0]:
215 warned[0] = True
216 warned[0] = True
216 self.ui.warn(_("warning: ignoring unknown"
217 self.ui.warn(_("warning: ignoring unknown"
217 " working parent %s!\n") % short(node))
218 " working parent %s!\n") % short(node))
218 return nullid
219 return nullid
219
220
220 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
221 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
221
222
222 def __getitem__(self, changeid):
223 def __getitem__(self, changeid):
223 if changeid is None:
224 if changeid is None:
224 return context.workingctx(self)
225 return context.workingctx(self)
225 return context.changectx(self, changeid)
226 return context.changectx(self, changeid)
226
227
227 def __contains__(self, changeid):
228 def __contains__(self, changeid):
228 try:
229 try:
229 return bool(self.lookup(changeid))
230 return bool(self.lookup(changeid))
230 except error.RepoLookupError:
231 except error.RepoLookupError:
231 return False
232 return False
232
233
233 def __nonzero__(self):
234 def __nonzero__(self):
234 return True
235 return True
235
236
236 def __len__(self):
237 def __len__(self):
237 return len(self.changelog)
238 return len(self.changelog)
238
239
239 def __iter__(self):
240 def __iter__(self):
240 for i in xrange(len(self)):
241 for i in xrange(len(self)):
241 yield i
242 yield i
242
243
243 def revs(self, expr, *args):
244 def revs(self, expr, *args):
244 '''Return a list of revisions matching the given revset'''
245 '''Return a list of revisions matching the given revset'''
245 expr = revset.formatspec(expr, *args)
246 expr = revset.formatspec(expr, *args)
246 m = revset.match(None, expr)
247 m = revset.match(None, expr)
247 return [r for r in m(self, range(len(self)))]
248 return [r for r in m(self, range(len(self)))]
248
249
249 def set(self, expr, *args):
250 def set(self, expr, *args):
250 '''
251 '''
251 Yield a context for each matching revision, after doing arg
252 Yield a context for each matching revision, after doing arg
252 replacement via revset.formatspec
253 replacement via revset.formatspec
253 '''
254 '''
254 for r in self.revs(expr, *args):
255 for r in self.revs(expr, *args):
255 yield self[r]
256 yield self[r]
256
257
257 def url(self):
258 def url(self):
258 return 'file:' + self.root
259 return 'file:' + self.root
259
260
260 def hook(self, name, throw=False, **args):
261 def hook(self, name, throw=False, **args):
261 return hook.hook(self.ui, self, name, throw, **args)
262 return hook.hook(self.ui, self, name, throw, **args)
262
263
263 tag_disallowed = ':\r\n'
264 tag_disallowed = ':\r\n'
264
265
265 def _tag(self, names, node, message, local, user, date, extra={}):
266 def _tag(self, names, node, message, local, user, date, extra={}):
266 if isinstance(names, str):
267 if isinstance(names, str):
267 allchars = names
268 allchars = names
268 names = (names,)
269 names = (names,)
269 else:
270 else:
270 allchars = ''.join(names)
271 allchars = ''.join(names)
271 for c in self.tag_disallowed:
272 for c in self.tag_disallowed:
272 if c in allchars:
273 if c in allchars:
273 raise util.Abort(_('%r cannot be used in a tag name') % c)
274 raise util.Abort(_('%r cannot be used in a tag name') % c)
274
275
275 branches = self.branchmap()
276 branches = self.branchmap()
276 for name in names:
277 for name in names:
277 self.hook('pretag', throw=True, node=hex(node), tag=name,
278 self.hook('pretag', throw=True, node=hex(node), tag=name,
278 local=local)
279 local=local)
279 if name in branches:
280 if name in branches:
280 self.ui.warn(_("warning: tag %s conflicts with existing"
281 self.ui.warn(_("warning: tag %s conflicts with existing"
281 " branch name\n") % name)
282 " branch name\n") % name)
282
283
283 def writetags(fp, names, munge, prevtags):
284 def writetags(fp, names, munge, prevtags):
284 fp.seek(0, 2)
285 fp.seek(0, 2)
285 if prevtags and prevtags[-1] != '\n':
286 if prevtags and prevtags[-1] != '\n':
286 fp.write('\n')
287 fp.write('\n')
287 for name in names:
288 for name in names:
288 m = munge and munge(name) or name
289 m = munge and munge(name) or name
289 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
290 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
290 old = self.tags().get(name, nullid)
291 old = self.tags().get(name, nullid)
291 fp.write('%s %s\n' % (hex(old), m))
292 fp.write('%s %s\n' % (hex(old), m))
292 fp.write('%s %s\n' % (hex(node), m))
293 fp.write('%s %s\n' % (hex(node), m))
293 fp.close()
294 fp.close()
294
295
295 prevtags = ''
296 prevtags = ''
296 if local:
297 if local:
297 try:
298 try:
298 fp = self.opener('localtags', 'r+')
299 fp = self.opener('localtags', 'r+')
299 except IOError:
300 except IOError:
300 fp = self.opener('localtags', 'a')
301 fp = self.opener('localtags', 'a')
301 else:
302 else:
302 prevtags = fp.read()
303 prevtags = fp.read()
303
304
304 # local tags are stored in the current charset
305 # local tags are stored in the current charset
305 writetags(fp, names, None, prevtags)
306 writetags(fp, names, None, prevtags)
306 for name in names:
307 for name in names:
307 self.hook('tag', node=hex(node), tag=name, local=local)
308 self.hook('tag', node=hex(node), tag=name, local=local)
308 return
309 return
309
310
310 try:
311 try:
311 fp = self.wfile('.hgtags', 'rb+')
312 fp = self.wfile('.hgtags', 'rb+')
312 except IOError, e:
313 except IOError, e:
313 if e.errno != errno.ENOENT:
314 if e.errno != errno.ENOENT:
314 raise
315 raise
315 fp = self.wfile('.hgtags', 'ab')
316 fp = self.wfile('.hgtags', 'ab')
316 else:
317 else:
317 prevtags = fp.read()
318 prevtags = fp.read()
318
319
319 # committed tags are stored in UTF-8
320 # committed tags are stored in UTF-8
320 writetags(fp, names, encoding.fromlocal, prevtags)
321 writetags(fp, names, encoding.fromlocal, prevtags)
321
322
322 fp.close()
323 fp.close()
323
324
324 if '.hgtags' not in self.dirstate:
325 if '.hgtags' not in self.dirstate:
325 self[None].add(['.hgtags'])
326 self[None].add(['.hgtags'])
326
327
327 m = matchmod.exact(self.root, '', ['.hgtags'])
328 m = matchmod.exact(self.root, '', ['.hgtags'])
328 tagnode = self.commit(message, user, date, extra=extra, match=m)
329 tagnode = self.commit(message, user, date, extra=extra, match=m)
329
330
330 for name in names:
331 for name in names:
331 self.hook('tag', node=hex(node), tag=name, local=local)
332 self.hook('tag', node=hex(node), tag=name, local=local)
332
333
333 return tagnode
334 return tagnode
334
335
335 def tag(self, names, node, message, local, user, date):
336 def tag(self, names, node, message, local, user, date):
336 '''tag a revision with one or more symbolic names.
337 '''tag a revision with one or more symbolic names.
337
338
338 names is a list of strings or, when adding a single tag, names may be a
339 names is a list of strings or, when adding a single tag, names may be a
339 string.
340 string.
340
341
341 if local is True, the tags are stored in a per-repository file.
342 if local is True, the tags are stored in a per-repository file.
342 otherwise, they are stored in the .hgtags file, and a new
343 otherwise, they are stored in the .hgtags file, and a new
343 changeset is committed with the change.
344 changeset is committed with the change.
344
345
345 keyword arguments:
346 keyword arguments:
346
347
347 local: whether to store tags in non-version-controlled file
348 local: whether to store tags in non-version-controlled file
348 (default False)
349 (default False)
349
350
350 message: commit message to use if committing
351 message: commit message to use if committing
351
352
352 user: name of user to use if committing
353 user: name of user to use if committing
353
354
354 date: date tuple to use if committing'''
355 date: date tuple to use if committing'''
355
356
356 if not local:
357 if not local:
357 for x in self.status()[:5]:
358 for x in self.status()[:5]:
358 if '.hgtags' in x:
359 if '.hgtags' in x:
359 raise util.Abort(_('working copy of .hgtags is changed '
360 raise util.Abort(_('working copy of .hgtags is changed '
360 '(please commit .hgtags manually)'))
361 '(please commit .hgtags manually)'))
361
362
362 self.tags() # instantiate the cache
363 self.tags() # instantiate the cache
363 self._tag(names, node, message, local, user, date)
364 self._tag(names, node, message, local, user, date)
364
365
365 @propertycache
366 @propertycache
366 def _tagscache(self):
367 def _tagscache(self):
367 '''Returns a tagscache object that contains various tags related caches.'''
368 '''Returns a tagscache object that contains various tags related caches.'''
368
369
369 # This simplifies its cache management by having one decorated
370 # This simplifies its cache management by having one decorated
370 # function (this one) and the rest simply fetch things from it.
371 # function (this one) and the rest simply fetch things from it.
371 class tagscache(object):
372 class tagscache(object):
372 def __init__(self):
373 def __init__(self):
373 # These two define the set of tags for this repository. tags
374 # These two define the set of tags for this repository. tags
374 # maps tag name to node; tagtypes maps tag name to 'global' or
375 # maps tag name to node; tagtypes maps tag name to 'global' or
375 # 'local'. (Global tags are defined by .hgtags across all
376 # 'local'. (Global tags are defined by .hgtags across all
376 # heads, and local tags are defined in .hg/localtags.)
377 # heads, and local tags are defined in .hg/localtags.)
377 # They constitute the in-memory cache of tags.
378 # They constitute the in-memory cache of tags.
378 self.tags = self.tagtypes = None
379 self.tags = self.tagtypes = None
379
380
380 self.nodetagscache = self.tagslist = None
381 self.nodetagscache = self.tagslist = None
381
382
382 cache = tagscache()
383 cache = tagscache()
383 cache.tags, cache.tagtypes = self._findtags()
384 cache.tags, cache.tagtypes = self._findtags()
384
385
385 return cache
386 return cache
386
387
387 def tags(self):
388 def tags(self):
388 '''return a mapping of tag to node'''
389 '''return a mapping of tag to node'''
389 return self._tagscache.tags
390 return self._tagscache.tags
390
391
391 def _findtags(self):
392 def _findtags(self):
392 '''Do the hard work of finding tags. Return a pair of dicts
393 '''Do the hard work of finding tags. Return a pair of dicts
393 (tags, tagtypes) where tags maps tag name to node, and tagtypes
394 (tags, tagtypes) where tags maps tag name to node, and tagtypes
394 maps tag name to a string like \'global\' or \'local\'.
395 maps tag name to a string like \'global\' or \'local\'.
395 Subclasses or extensions are free to add their own tags, but
396 Subclasses or extensions are free to add their own tags, but
396 should be aware that the returned dicts will be retained for the
397 should be aware that the returned dicts will be retained for the
397 duration of the localrepo object.'''
398 duration of the localrepo object.'''
398
399
399 # XXX what tagtype should subclasses/extensions use? Currently
400 # XXX what tagtype should subclasses/extensions use? Currently
400 # mq and bookmarks add tags, but do not set the tagtype at all.
401 # mq and bookmarks add tags, but do not set the tagtype at all.
401 # Should each extension invent its own tag type? Should there
402 # Should each extension invent its own tag type? Should there
402 # be one tagtype for all such "virtual" tags? Or is the status
403 # be one tagtype for all such "virtual" tags? Or is the status
403 # quo fine?
404 # quo fine?
404
405
405 alltags = {} # map tag name to (node, hist)
406 alltags = {} # map tag name to (node, hist)
406 tagtypes = {}
407 tagtypes = {}
407
408
408 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
409 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
409 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
410 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
410
411
411 # Build the return dicts. Have to re-encode tag names because
412 # Build the return dicts. Have to re-encode tag names because
412 # the tags module always uses UTF-8 (in order not to lose info
413 # the tags module always uses UTF-8 (in order not to lose info
413 # writing to the cache), but the rest of Mercurial wants them in
414 # writing to the cache), but the rest of Mercurial wants them in
414 # local encoding.
415 # local encoding.
415 tags = {}
416 tags = {}
416 for (name, (node, hist)) in alltags.iteritems():
417 for (name, (node, hist)) in alltags.iteritems():
417 if node != nullid:
418 if node != nullid:
418 try:
419 try:
419 # ignore tags to unknown nodes
420 # ignore tags to unknown nodes
420 self.changelog.lookup(node)
421 self.changelog.lookup(node)
421 tags[encoding.tolocal(name)] = node
422 tags[encoding.tolocal(name)] = node
422 except error.LookupError:
423 except error.LookupError:
423 pass
424 pass
424 tags['tip'] = self.changelog.tip()
425 tags['tip'] = self.changelog.tip()
425 tagtypes = dict([(encoding.tolocal(name), value)
426 tagtypes = dict([(encoding.tolocal(name), value)
426 for (name, value) in tagtypes.iteritems()])
427 for (name, value) in tagtypes.iteritems()])
427 return (tags, tagtypes)
428 return (tags, tagtypes)
428
429
429 def tagtype(self, tagname):
430 def tagtype(self, tagname):
430 '''
431 '''
431 return the type of the given tag. result can be:
432 return the type of the given tag. result can be:
432
433
433 'local' : a local tag
434 'local' : a local tag
434 'global' : a global tag
435 'global' : a global tag
435 None : tag does not exist
436 None : tag does not exist
436 '''
437 '''
437
438
438 return self._tagscache.tagtypes.get(tagname)
439 return self._tagscache.tagtypes.get(tagname)
439
440
440 def tagslist(self):
441 def tagslist(self):
441 '''return a list of tags ordered by revision'''
442 '''return a list of tags ordered by revision'''
442 if not self._tagscache.tagslist:
443 if not self._tagscache.tagslist:
443 l = []
444 l = []
444 for t, n in self.tags().iteritems():
445 for t, n in self.tags().iteritems():
445 r = self.changelog.rev(n)
446 r = self.changelog.rev(n)
446 l.append((r, t, n))
447 l.append((r, t, n))
447 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
448 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
448
449
449 return self._tagscache.tagslist
450 return self._tagscache.tagslist
450
451
451 def nodetags(self, node):
452 def nodetags(self, node):
452 '''return the tags associated with a node'''
453 '''return the tags associated with a node'''
453 if not self._tagscache.nodetagscache:
454 if not self._tagscache.nodetagscache:
454 nodetagscache = {}
455 nodetagscache = {}
455 for t, n in self.tags().iteritems():
456 for t, n in self.tags().iteritems():
456 nodetagscache.setdefault(n, []).append(t)
457 nodetagscache.setdefault(n, []).append(t)
457 for tags in nodetagscache.itervalues():
458 for tags in nodetagscache.itervalues():
458 tags.sort()
459 tags.sort()
459 self._tagscache.nodetagscache = nodetagscache
460 self._tagscache.nodetagscache = nodetagscache
460 return self._tagscache.nodetagscache.get(node, [])
461 return self._tagscache.nodetagscache.get(node, [])
461
462
462 def nodebookmarks(self, node):
463 def nodebookmarks(self, node):
463 marks = []
464 marks = []
464 for bookmark, n in self._bookmarks.iteritems():
465 for bookmark, n in self._bookmarks.iteritems():
465 if n == node:
466 if n == node:
466 marks.append(bookmark)
467 marks.append(bookmark)
467 return sorted(marks)
468 return sorted(marks)
468
469
469 def _branchtags(self, partial, lrev):
470 def _branchtags(self, partial, lrev):
470 # TODO: rename this function?
471 # TODO: rename this function?
471 tiprev = len(self) - 1
472 tiprev = len(self) - 1
472 if lrev != tiprev:
473 if lrev != tiprev:
473 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
474 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
474 self._updatebranchcache(partial, ctxgen)
475 self._updatebranchcache(partial, ctxgen)
475 self._writebranchcache(partial, self.changelog.tip(), tiprev)
476 self._writebranchcache(partial, self.changelog.tip(), tiprev)
476
477
477 return partial
478 return partial
478
479
479 def updatebranchcache(self):
480 def updatebranchcache(self):
480 tip = self.changelog.tip()
481 tip = self.changelog.tip()
481 if self._branchcache is not None and self._branchcachetip == tip:
482 if self._branchcache is not None and self._branchcachetip == tip:
482 return self._branchcache
483 return self._branchcache
483
484
484 oldtip = self._branchcachetip
485 oldtip = self._branchcachetip
485 self._branchcachetip = tip
486 self._branchcachetip = tip
486 if oldtip is None or oldtip not in self.changelog.nodemap:
487 if oldtip is None or oldtip not in self.changelog.nodemap:
487 partial, last, lrev = self._readbranchcache()
488 partial, last, lrev = self._readbranchcache()
488 else:
489 else:
489 lrev = self.changelog.rev(oldtip)
490 lrev = self.changelog.rev(oldtip)
490 partial = self._branchcache
491 partial = self._branchcache
491
492
492 self._branchtags(partial, lrev)
493 self._branchtags(partial, lrev)
493 # this private cache holds all heads (not just tips)
494 # this private cache holds all heads (not just tips)
494 self._branchcache = partial
495 self._branchcache = partial
495
496
496 def branchmap(self):
497 def branchmap(self):
497 '''returns a dictionary {branch: [branchheads]}'''
498 '''returns a dictionary {branch: [branchheads]}'''
498 self.updatebranchcache()
499 self.updatebranchcache()
499 return self._branchcache
500 return self._branchcache
500
501
501 def branchtags(self):
502 def branchtags(self):
502 '''return a dict where branch names map to the tipmost head of
503 '''return a dict where branch names map to the tipmost head of
503 the branch, open heads come before closed'''
504 the branch, open heads come before closed'''
504 bt = {}
505 bt = {}
505 for bn, heads in self.branchmap().iteritems():
506 for bn, heads in self.branchmap().iteritems():
506 tip = heads[-1]
507 tip = heads[-1]
507 for h in reversed(heads):
508 for h in reversed(heads):
508 if 'close' not in self.changelog.read(h)[5]:
509 if 'close' not in self.changelog.read(h)[5]:
509 tip = h
510 tip = h
510 break
511 break
511 bt[bn] = tip
512 bt[bn] = tip
512 return bt
513 return bt
513
514
514 def _readbranchcache(self):
515 def _readbranchcache(self):
515 partial = {}
516 partial = {}
516 try:
517 try:
517 f = self.opener("cache/branchheads")
518 f = self.opener("cache/branchheads")
518 lines = f.read().split('\n')
519 lines = f.read().split('\n')
519 f.close()
520 f.close()
520 except (IOError, OSError):
521 except (IOError, OSError):
521 return {}, nullid, nullrev
522 return {}, nullid, nullrev
522
523
523 try:
524 try:
524 last, lrev = lines.pop(0).split(" ", 1)
525 last, lrev = lines.pop(0).split(" ", 1)
525 last, lrev = bin(last), int(lrev)
526 last, lrev = bin(last), int(lrev)
526 if lrev >= len(self) or self[lrev].node() != last:
527 if lrev >= len(self) or self[lrev].node() != last:
527 # invalidate the cache
528 # invalidate the cache
528 raise ValueError('invalidating branch cache (tip differs)')
529 raise ValueError('invalidating branch cache (tip differs)')
529 for l in lines:
530 for l in lines:
530 if not l:
531 if not l:
531 continue
532 continue
532 node, label = l.split(" ", 1)
533 node, label = l.split(" ", 1)
533 label = encoding.tolocal(label.strip())
534 label = encoding.tolocal(label.strip())
534 partial.setdefault(label, []).append(bin(node))
535 partial.setdefault(label, []).append(bin(node))
535 except KeyboardInterrupt:
536 except KeyboardInterrupt:
536 raise
537 raise
537 except Exception, inst:
538 except Exception, inst:
538 if self.ui.debugflag:
539 if self.ui.debugflag:
539 self.ui.warn(str(inst), '\n')
540 self.ui.warn(str(inst), '\n')
540 partial, last, lrev = {}, nullid, nullrev
541 partial, last, lrev = {}, nullid, nullrev
541 return partial, last, lrev
542 return partial, last, lrev
542
543
543 def _writebranchcache(self, branches, tip, tiprev):
544 def _writebranchcache(self, branches, tip, tiprev):
544 try:
545 try:
545 f = self.opener("cache/branchheads", "w", atomictemp=True)
546 f = self.opener("cache/branchheads", "w", atomictemp=True)
546 f.write("%s %s\n" % (hex(tip), tiprev))
547 f.write("%s %s\n" % (hex(tip), tiprev))
547 for label, nodes in branches.iteritems():
548 for label, nodes in branches.iteritems():
548 for node in nodes:
549 for node in nodes:
549 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
550 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
550 f.close()
551 f.close()
551 except (IOError, OSError):
552 except (IOError, OSError):
552 pass
553 pass
553
554
554 def _updatebranchcache(self, partial, ctxgen):
555 def _updatebranchcache(self, partial, ctxgen):
555 # collect new branch entries
556 # collect new branch entries
556 newbranches = {}
557 newbranches = {}
557 for c in ctxgen:
558 for c in ctxgen:
558 newbranches.setdefault(c.branch(), []).append(c.node())
559 newbranches.setdefault(c.branch(), []).append(c.node())
559 # if older branchheads are reachable from new ones, they aren't
560 # if older branchheads are reachable from new ones, they aren't
560 # really branchheads. Note checking parents is insufficient:
561 # really branchheads. Note checking parents is insufficient:
561 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
562 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
562 for branch, newnodes in newbranches.iteritems():
563 for branch, newnodes in newbranches.iteritems():
563 bheads = partial.setdefault(branch, [])
564 bheads = partial.setdefault(branch, [])
564 bheads.extend(newnodes)
565 bheads.extend(newnodes)
565 if len(bheads) <= 1:
566 if len(bheads) <= 1:
566 continue
567 continue
567 bheads = sorted(bheads, key=lambda x: self[x].rev())
568 bheads = sorted(bheads, key=lambda x: self[x].rev())
568 # starting from tip means fewer passes over reachable
569 # starting from tip means fewer passes over reachable
569 while newnodes:
570 while newnodes:
570 latest = newnodes.pop()
571 latest = newnodes.pop()
571 if latest not in bheads:
572 if latest not in bheads:
572 continue
573 continue
573 minbhrev = self[bheads[0]].node()
574 minbhrev = self[bheads[0]].node()
574 reachable = self.changelog.reachable(latest, minbhrev)
575 reachable = self.changelog.reachable(latest, minbhrev)
575 reachable.remove(latest)
576 reachable.remove(latest)
576 if reachable:
577 if reachable:
577 bheads = [b for b in bheads if b not in reachable]
578 bheads = [b for b in bheads if b not in reachable]
578 partial[branch] = bheads
579 partial[branch] = bheads
579
580
580 def lookup(self, key):
581 def lookup(self, key):
581 if isinstance(key, int):
582 if isinstance(key, int):
582 return self.changelog.node(key)
583 return self.changelog.node(key)
583 elif key == '.':
584 elif key == '.':
584 return self.dirstate.p1()
585 return self.dirstate.p1()
585 elif key == 'null':
586 elif key == 'null':
586 return nullid
587 return nullid
587 elif key == 'tip':
588 elif key == 'tip':
588 return self.changelog.tip()
589 return self.changelog.tip()
589 n = self.changelog._match(key)
590 n = self.changelog._match(key)
590 if n:
591 if n:
591 return n
592 return n
592 if key in self._bookmarks:
593 if key in self._bookmarks:
593 return self._bookmarks[key]
594 return self._bookmarks[key]
594 if key in self.tags():
595 if key in self.tags():
595 return self.tags()[key]
596 return self.tags()[key]
596 if key in self.branchtags():
597 if key in self.branchtags():
597 return self.branchtags()[key]
598 return self.branchtags()[key]
598 n = self.changelog._partialmatch(key)
599 n = self.changelog._partialmatch(key)
599 if n:
600 if n:
600 return n
601 return n
601
602
602 # can't find key, check if it might have come from damaged dirstate
603 # can't find key, check if it might have come from damaged dirstate
603 if key in self.dirstate.parents():
604 if key in self.dirstate.parents():
604 raise error.Abort(_("working directory has unknown parent '%s'!")
605 raise error.Abort(_("working directory has unknown parent '%s'!")
605 % short(key))
606 % short(key))
606 try:
607 try:
607 if len(key) == 20:
608 if len(key) == 20:
608 key = hex(key)
609 key = hex(key)
609 except TypeError:
610 except TypeError:
610 pass
611 pass
611 raise error.RepoLookupError(_("unknown revision '%s'") % key)
612 raise error.RepoLookupError(_("unknown revision '%s'") % key)
612
613
613 def lookupbranch(self, key, remote=None):
614 def lookupbranch(self, key, remote=None):
614 repo = remote or self
615 repo = remote or self
615 if key in repo.branchmap():
616 if key in repo.branchmap():
616 return key
617 return key
617
618
618 repo = (remote and remote.local()) and remote or self
619 repo = (remote and remote.local()) and remote or self
619 return repo[key].branch()
620 return repo[key].branch()
620
621
621 def known(self, nodes):
622 def known(self, nodes):
622 nm = self.changelog.nodemap
623 nm = self.changelog.nodemap
623 return [(n in nm) for n in nodes]
624 return [(n in nm) for n in nodes]
624
625
625 def local(self):
626 def local(self):
626 return self
627 return self
627
628
628 def join(self, f):
629 def join(self, f):
629 return os.path.join(self.path, f)
630 return os.path.join(self.path, f)
630
631
631 def wjoin(self, f):
632 def wjoin(self, f):
632 return os.path.join(self.root, f)
633 return os.path.join(self.root, f)
633
634
634 def file(self, f):
635 def file(self, f):
635 if f[0] == '/':
636 if f[0] == '/':
636 f = f[1:]
637 f = f[1:]
637 return filelog.filelog(self.sopener, f)
638 return filelog.filelog(self.sopener, f)
638
639
639 def changectx(self, changeid):
640 def changectx(self, changeid):
640 return self[changeid]
641 return self[changeid]
641
642
642 def parents(self, changeid=None):
643 def parents(self, changeid=None):
643 '''get list of changectxs for parents of changeid'''
644 '''get list of changectxs for parents of changeid'''
644 return self[changeid].parents()
645 return self[changeid].parents()
645
646
646 def filectx(self, path, changeid=None, fileid=None):
647 def filectx(self, path, changeid=None, fileid=None):
647 """changeid can be a changeset revision, node, or tag.
648 """changeid can be a changeset revision, node, or tag.
648 fileid can be a file revision or node."""
649 fileid can be a file revision or node."""
649 return context.filectx(self, path, changeid, fileid)
650 return context.filectx(self, path, changeid, fileid)
650
651
651 def getcwd(self):
652 def getcwd(self):
652 return self.dirstate.getcwd()
653 return self.dirstate.getcwd()
653
654
654 def pathto(self, f, cwd=None):
655 def pathto(self, f, cwd=None):
655 return self.dirstate.pathto(f, cwd)
656 return self.dirstate.pathto(f, cwd)
656
657
657 def wfile(self, f, mode='r'):
658 def wfile(self, f, mode='r'):
658 return self.wopener(f, mode)
659 return self.wopener(f, mode)
659
660
660 def _link(self, f):
661 def _link(self, f):
661 return os.path.islink(self.wjoin(f))
662 return os.path.islink(self.wjoin(f))
662
663
663 def _loadfilter(self, filter):
664 def _loadfilter(self, filter):
664 if filter not in self.filterpats:
665 if filter not in self.filterpats:
665 l = []
666 l = []
666 for pat, cmd in self.ui.configitems(filter):
667 for pat, cmd in self.ui.configitems(filter):
667 if cmd == '!':
668 if cmd == '!':
668 continue
669 continue
669 mf = matchmod.match(self.root, '', [pat])
670 mf = matchmod.match(self.root, '', [pat])
670 fn = None
671 fn = None
671 params = cmd
672 params = cmd
672 for name, filterfn in self._datafilters.iteritems():
673 for name, filterfn in self._datafilters.iteritems():
673 if cmd.startswith(name):
674 if cmd.startswith(name):
674 fn = filterfn
675 fn = filterfn
675 params = cmd[len(name):].lstrip()
676 params = cmd[len(name):].lstrip()
676 break
677 break
677 if not fn:
678 if not fn:
678 fn = lambda s, c, **kwargs: util.filter(s, c)
679 fn = lambda s, c, **kwargs: util.filter(s, c)
679 # Wrap old filters not supporting keyword arguments
680 # Wrap old filters not supporting keyword arguments
680 if not inspect.getargspec(fn)[2]:
681 if not inspect.getargspec(fn)[2]:
681 oldfn = fn
682 oldfn = fn
682 fn = lambda s, c, **kwargs: oldfn(s, c)
683 fn = lambda s, c, **kwargs: oldfn(s, c)
683 l.append((mf, fn, params))
684 l.append((mf, fn, params))
684 self.filterpats[filter] = l
685 self.filterpats[filter] = l
685 return self.filterpats[filter]
686 return self.filterpats[filter]
686
687
687 def _filter(self, filterpats, filename, data):
688 def _filter(self, filterpats, filename, data):
688 for mf, fn, cmd in filterpats:
689 for mf, fn, cmd in filterpats:
689 if mf(filename):
690 if mf(filename):
690 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
691 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
691 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
692 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
692 break
693 break
693
694
694 return data
695 return data
695
696
696 @propertycache
697 @propertycache
697 def _encodefilterpats(self):
698 def _encodefilterpats(self):
698 return self._loadfilter('encode')
699 return self._loadfilter('encode')
699
700
700 @propertycache
701 @propertycache
701 def _decodefilterpats(self):
702 def _decodefilterpats(self):
702 return self._loadfilter('decode')
703 return self._loadfilter('decode')
703
704
704 def adddatafilter(self, name, filter):
705 def adddatafilter(self, name, filter):
705 self._datafilters[name] = filter
706 self._datafilters[name] = filter
706
707
707 def wread(self, filename):
708 def wread(self, filename):
708 if self._link(filename):
709 if self._link(filename):
709 data = os.readlink(self.wjoin(filename))
710 data = os.readlink(self.wjoin(filename))
710 else:
711 else:
711 data = self.wopener.read(filename)
712 data = self.wopener.read(filename)
712 return self._filter(self._encodefilterpats, filename, data)
713 return self._filter(self._encodefilterpats, filename, data)
713
714
714 def wwrite(self, filename, data, flags):
715 def wwrite(self, filename, data, flags):
715 data = self._filter(self._decodefilterpats, filename, data)
716 data = self._filter(self._decodefilterpats, filename, data)
716 if 'l' in flags:
717 if 'l' in flags:
717 self.wopener.symlink(data, filename)
718 self.wopener.symlink(data, filename)
718 else:
719 else:
719 self.wopener.write(filename, data)
720 self.wopener.write(filename, data)
720 if 'x' in flags:
721 if 'x' in flags:
721 util.setflags(self.wjoin(filename), False, True)
722 util.setflags(self.wjoin(filename), False, True)
722
723
723 def wwritedata(self, filename, data):
724 def wwritedata(self, filename, data):
724 return self._filter(self._decodefilterpats, filename, data)
725 return self._filter(self._decodefilterpats, filename, data)
725
726
726 def transaction(self, desc):
727 def transaction(self, desc):
727 tr = self._transref and self._transref() or None
728 tr = self._transref and self._transref() or None
728 if tr and tr.running():
729 if tr and tr.running():
729 return tr.nest()
730 return tr.nest()
730
731
731 # abort here if the journal already exists
732 # abort here if the journal already exists
732 if os.path.exists(self.sjoin("journal")):
733 if os.path.exists(self.sjoin("journal")):
733 raise error.RepoError(
734 raise error.RepoError(
734 _("abandoned transaction found - run hg recover"))
735 _("abandoned transaction found - run hg recover"))
735
736
736 journalfiles = self._writejournal(desc)
737 journalfiles = self._writejournal(desc)
737 renames = [(x, undoname(x)) for x in journalfiles]
738 renames = [(x, undoname(x)) for x in journalfiles]
738
739
739 tr = transaction.transaction(self.ui.warn, self.sopener,
740 tr = transaction.transaction(self.ui.warn, self.sopener,
740 self.sjoin("journal"),
741 self.sjoin("journal"),
741 aftertrans(renames),
742 aftertrans(renames),
742 self.store.createmode)
743 self.store.createmode)
743 self._transref = weakref.ref(tr)
744 self._transref = weakref.ref(tr)
744 return tr
745 return tr
745
746
746 def _writejournal(self, desc):
747 def _writejournal(self, desc):
747 # save dirstate for rollback
748 # save dirstate for rollback
748 try:
749 try:
749 ds = self.opener.read("dirstate")
750 ds = self.opener.read("dirstate")
750 except IOError:
751 except IOError:
751 ds = ""
752 ds = ""
752 self.opener.write("journal.dirstate", ds)
753 self.opener.write("journal.dirstate", ds)
753 self.opener.write("journal.branch",
754 self.opener.write("journal.branch",
754 encoding.fromlocal(self.dirstate.branch()))
755 encoding.fromlocal(self.dirstate.branch()))
755 self.opener.write("journal.desc",
756 self.opener.write("journal.desc",
756 "%d\n%s\n" % (len(self), desc))
757 "%d\n%s\n" % (len(self), desc))
757
758
758 bkname = self.join('bookmarks')
759 bkname = self.join('bookmarks')
759 if os.path.exists(bkname):
760 if os.path.exists(bkname):
760 util.copyfile(bkname, self.join('journal.bookmarks'))
761 util.copyfile(bkname, self.join('journal.bookmarks'))
761 else:
762 else:
762 self.opener.write('journal.bookmarks', '')
763 self.opener.write('journal.bookmarks', '')
763 phasesname = self.sjoin('phaseroots')
764 phasesname = self.sjoin('phaseroots')
764 if os.path.exists(phasesname):
765 if os.path.exists(phasesname):
765 util.copyfile(phasesname, self.sjoin('journal.phaseroots'))
766 util.copyfile(phasesname, self.sjoin('journal.phaseroots'))
766 else:
767 else:
767 self.sopener.write('journal.phaseroots', '')
768 self.sopener.write('journal.phaseroots', '')
768
769
769 return (self.sjoin('journal'), self.join('journal.dirstate'),
770 return (self.sjoin('journal'), self.join('journal.dirstate'),
770 self.join('journal.branch'), self.join('journal.desc'),
771 self.join('journal.branch'), self.join('journal.desc'),
771 self.join('journal.bookmarks'),
772 self.join('journal.bookmarks'),
772 self.sjoin('journal.phaseroots'))
773 self.sjoin('journal.phaseroots'))
773
774
774 def recover(self):
775 def recover(self):
775 lock = self.lock()
776 lock = self.lock()
776 try:
777 try:
777 if os.path.exists(self.sjoin("journal")):
778 if os.path.exists(self.sjoin("journal")):
778 self.ui.status(_("rolling back interrupted transaction\n"))
779 self.ui.status(_("rolling back interrupted transaction\n"))
779 transaction.rollback(self.sopener, self.sjoin("journal"),
780 transaction.rollback(self.sopener, self.sjoin("journal"),
780 self.ui.warn)
781 self.ui.warn)
781 self.invalidate()
782 self.invalidate()
782 return True
783 return True
783 else:
784 else:
784 self.ui.warn(_("no interrupted transaction available\n"))
785 self.ui.warn(_("no interrupted transaction available\n"))
785 return False
786 return False
786 finally:
787 finally:
787 lock.release()
788 lock.release()
788
789
789 def rollback(self, dryrun=False, force=False):
790 def rollback(self, dryrun=False, force=False):
790 wlock = lock = None
791 wlock = lock = None
791 try:
792 try:
792 wlock = self.wlock()
793 wlock = self.wlock()
793 lock = self.lock()
794 lock = self.lock()
794 if os.path.exists(self.sjoin("undo")):
795 if os.path.exists(self.sjoin("undo")):
795 return self._rollback(dryrun, force)
796 return self._rollback(dryrun, force)
796 else:
797 else:
797 self.ui.warn(_("no rollback information available\n"))
798 self.ui.warn(_("no rollback information available\n"))
798 return 1
799 return 1
799 finally:
800 finally:
800 release(lock, wlock)
801 release(lock, wlock)
801
802
802 def _rollback(self, dryrun, force):
803 def _rollback(self, dryrun, force):
803 ui = self.ui
804 ui = self.ui
804 try:
805 try:
805 args = self.opener.read('undo.desc').splitlines()
806 args = self.opener.read('undo.desc').splitlines()
806 (oldlen, desc, detail) = (int(args[0]), args[1], None)
807 (oldlen, desc, detail) = (int(args[0]), args[1], None)
807 if len(args) >= 3:
808 if len(args) >= 3:
808 detail = args[2]
809 detail = args[2]
809 oldtip = oldlen - 1
810 oldtip = oldlen - 1
810
811
811 if detail and ui.verbose:
812 if detail and ui.verbose:
812 msg = (_('repository tip rolled back to revision %s'
813 msg = (_('repository tip rolled back to revision %s'
813 ' (undo %s: %s)\n')
814 ' (undo %s: %s)\n')
814 % (oldtip, desc, detail))
815 % (oldtip, desc, detail))
815 else:
816 else:
816 msg = (_('repository tip rolled back to revision %s'
817 msg = (_('repository tip rolled back to revision %s'
817 ' (undo %s)\n')
818 ' (undo %s)\n')
818 % (oldtip, desc))
819 % (oldtip, desc))
819 except IOError:
820 except IOError:
820 msg = _('rolling back unknown transaction\n')
821 msg = _('rolling back unknown transaction\n')
821 desc = None
822 desc = None
822
823
823 if not force and self['.'] != self['tip'] and desc == 'commit':
824 if not force and self['.'] != self['tip'] and desc == 'commit':
824 raise util.Abort(
825 raise util.Abort(
825 _('rollback of last commit while not checked out '
826 _('rollback of last commit while not checked out '
826 'may lose data'), hint=_('use -f to force'))
827 'may lose data'), hint=_('use -f to force'))
827
828
828 ui.status(msg)
829 ui.status(msg)
829 if dryrun:
830 if dryrun:
830 return 0
831 return 0
831
832
832 parents = self.dirstate.parents()
833 parents = self.dirstate.parents()
833 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
834 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
834 if os.path.exists(self.join('undo.bookmarks')):
835 if os.path.exists(self.join('undo.bookmarks')):
835 util.rename(self.join('undo.bookmarks'),
836 util.rename(self.join('undo.bookmarks'),
836 self.join('bookmarks'))
837 self.join('bookmarks'))
837 if os.path.exists(self.sjoin('undo.phaseroots')):
838 if os.path.exists(self.sjoin('undo.phaseroots')):
838 util.rename(self.sjoin('undo.phaseroots'),
839 util.rename(self.sjoin('undo.phaseroots'),
839 self.sjoin('phaseroots'))
840 self.sjoin('phaseroots'))
840 self.invalidate()
841 self.invalidate()
841
842
842 parentgone = (parents[0] not in self.changelog.nodemap or
843 parentgone = (parents[0] not in self.changelog.nodemap or
843 parents[1] not in self.changelog.nodemap)
844 parents[1] not in self.changelog.nodemap)
844 if parentgone:
845 if parentgone:
845 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
846 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
846 try:
847 try:
847 branch = self.opener.read('undo.branch')
848 branch = self.opener.read('undo.branch')
848 self.dirstate.setbranch(branch)
849 self.dirstate.setbranch(branch)
849 except IOError:
850 except IOError:
850 ui.warn(_('named branch could not be reset: '
851 ui.warn(_('named branch could not be reset: '
851 'current branch is still \'%s\'\n')
852 'current branch is still \'%s\'\n')
852 % self.dirstate.branch())
853 % self.dirstate.branch())
853
854
854 self.dirstate.invalidate()
855 self.dirstate.invalidate()
855 parents = tuple([p.rev() for p in self.parents()])
856 parents = tuple([p.rev() for p in self.parents()])
856 if len(parents) > 1:
857 if len(parents) > 1:
857 ui.status(_('working directory now based on '
858 ui.status(_('working directory now based on '
858 'revisions %d and %d\n') % parents)
859 'revisions %d and %d\n') % parents)
859 else:
860 else:
860 ui.status(_('working directory now based on '
861 ui.status(_('working directory now based on '
861 'revision %d\n') % parents)
862 'revision %d\n') % parents)
862 self.destroyed()
863 self.destroyed()
863 return 0
864 return 0
864
865
865 def invalidatecaches(self):
866 def invalidatecaches(self):
866 try:
867 try:
867 delattr(self, '_tagscache')
868 delattr(self, '_tagscache')
868 except AttributeError:
869 except AttributeError:
869 pass
870 pass
870
871
871 self._branchcache = None # in UTF-8
872 self._branchcache = None # in UTF-8
872 self._branchcachetip = None
873 self._branchcachetip = None
873
874
874 def invalidatedirstate(self):
875 def invalidatedirstate(self):
875 '''Invalidates the dirstate, causing the next call to dirstate
876 '''Invalidates the dirstate, causing the next call to dirstate
876 to check if it was modified since the last time it was read,
877 to check if it was modified since the last time it was read,
877 rereading it if it has.
878 rereading it if it has.
878
879
879 This is different to dirstate.invalidate() that it doesn't always
880 This is different to dirstate.invalidate() that it doesn't always
880 rereads the dirstate. Use dirstate.invalidate() if you want to
881 rereads the dirstate. Use dirstate.invalidate() if you want to
881 explicitly read the dirstate again (i.e. restoring it to a previous
882 explicitly read the dirstate again (i.e. restoring it to a previous
882 known good state).'''
883 known good state).'''
883 try:
884 try:
884 delattr(self, 'dirstate')
885 delattr(self, 'dirstate')
885 except AttributeError:
886 except AttributeError:
886 pass
887 pass
887
888
888 def invalidate(self):
889 def invalidate(self):
889 for k in self._filecache:
890 for k in self._filecache:
890 # dirstate is invalidated separately in invalidatedirstate()
891 # dirstate is invalidated separately in invalidatedirstate()
891 if k == 'dirstate':
892 if k == 'dirstate':
892 continue
893 continue
893
894
894 try:
895 try:
895 delattr(self, k)
896 delattr(self, k)
896 except AttributeError:
897 except AttributeError:
897 pass
898 pass
898 self.invalidatecaches()
899 self.invalidatecaches()
899
900
900 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
901 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
901 try:
902 try:
902 l = lock.lock(lockname, 0, releasefn, desc=desc)
903 l = lock.lock(lockname, 0, releasefn, desc=desc)
903 except error.LockHeld, inst:
904 except error.LockHeld, inst:
904 if not wait:
905 if not wait:
905 raise
906 raise
906 self.ui.warn(_("waiting for lock on %s held by %r\n") %
907 self.ui.warn(_("waiting for lock on %s held by %r\n") %
907 (desc, inst.locker))
908 (desc, inst.locker))
908 # default to 600 seconds timeout
909 # default to 600 seconds timeout
909 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
910 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
910 releasefn, desc=desc)
911 releasefn, desc=desc)
911 if acquirefn:
912 if acquirefn:
912 acquirefn()
913 acquirefn()
913 return l
914 return l
914
915
915 def _afterlock(self, callback):
916 def _afterlock(self, callback):
916 """add a callback to the current repository lock.
917 """add a callback to the current repository lock.
917
918
918 The callback will be executed on lock release."""
919 The callback will be executed on lock release."""
919 l = self._lockref and self._lockref()
920 l = self._lockref and self._lockref()
920 if l:
921 if l:
921 l.postrelease.append(callback)
922 l.postrelease.append(callback)
922
923
923 def lock(self, wait=True):
924 def lock(self, wait=True):
924 '''Lock the repository store (.hg/store) and return a weak reference
925 '''Lock the repository store (.hg/store) and return a weak reference
925 to the lock. Use this before modifying the store (e.g. committing or
926 to the lock. Use this before modifying the store (e.g. committing or
926 stripping). If you are opening a transaction, get a lock as well.)'''
927 stripping). If you are opening a transaction, get a lock as well.)'''
927 l = self._lockref and self._lockref()
928 l = self._lockref and self._lockref()
928 if l is not None and l.held:
929 if l is not None and l.held:
929 l.lock()
930 l.lock()
930 return l
931 return l
931
932
932 def unlock():
933 def unlock():
933 self.store.write()
934 self.store.write()
934 if self._dirtyphases:
935 if self._dirtyphases:
935 phases.writeroots(self)
936 phases.writeroots(self)
936 for k, ce in self._filecache.items():
937 for k, ce in self._filecache.items():
937 if k == 'dirstate':
938 if k == 'dirstate':
938 continue
939 continue
939 ce.refresh()
940 ce.refresh()
940
941
941 l = self._lock(self.sjoin("lock"), wait, unlock,
942 l = self._lock(self.sjoin("lock"), wait, unlock,
942 self.invalidate, _('repository %s') % self.origroot)
943 self.invalidate, _('repository %s') % self.origroot)
943 self._lockref = weakref.ref(l)
944 self._lockref = weakref.ref(l)
944 return l
945 return l
945
946
946 def wlock(self, wait=True):
947 def wlock(self, wait=True):
947 '''Lock the non-store parts of the repository (everything under
948 '''Lock the non-store parts of the repository (everything under
948 .hg except .hg/store) and return a weak reference to the lock.
949 .hg except .hg/store) and return a weak reference to the lock.
949 Use this before modifying files in .hg.'''
950 Use this before modifying files in .hg.'''
950 l = self._wlockref and self._wlockref()
951 l = self._wlockref and self._wlockref()
951 if l is not None and l.held:
952 if l is not None and l.held:
952 l.lock()
953 l.lock()
953 return l
954 return l
954
955
955 def unlock():
956 def unlock():
956 self.dirstate.write()
957 self.dirstate.write()
957 ce = self._filecache.get('dirstate')
958 ce = self._filecache.get('dirstate')
958 if ce:
959 if ce:
959 ce.refresh()
960 ce.refresh()
960
961
961 l = self._lock(self.join("wlock"), wait, unlock,
962 l = self._lock(self.join("wlock"), wait, unlock,
962 self.invalidatedirstate, _('working directory of %s') %
963 self.invalidatedirstate, _('working directory of %s') %
963 self.origroot)
964 self.origroot)
964 self._wlockref = weakref.ref(l)
965 self._wlockref = weakref.ref(l)
965 return l
966 return l
966
967
967 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
968 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
968 """
969 """
969 commit an individual file as part of a larger transaction
970 commit an individual file as part of a larger transaction
970 """
971 """
971
972
972 fname = fctx.path()
973 fname = fctx.path()
973 text = fctx.data()
974 text = fctx.data()
974 flog = self.file(fname)
975 flog = self.file(fname)
975 fparent1 = manifest1.get(fname, nullid)
976 fparent1 = manifest1.get(fname, nullid)
976 fparent2 = fparent2o = manifest2.get(fname, nullid)
977 fparent2 = fparent2o = manifest2.get(fname, nullid)
977
978
978 meta = {}
979 meta = {}
979 copy = fctx.renamed()
980 copy = fctx.renamed()
980 if copy and copy[0] != fname:
981 if copy and copy[0] != fname:
981 # Mark the new revision of this file as a copy of another
982 # Mark the new revision of this file as a copy of another
982 # file. This copy data will effectively act as a parent
983 # file. This copy data will effectively act as a parent
983 # of this new revision. If this is a merge, the first
984 # of this new revision. If this is a merge, the first
984 # parent will be the nullid (meaning "look up the copy data")
985 # parent will be the nullid (meaning "look up the copy data")
985 # and the second one will be the other parent. For example:
986 # and the second one will be the other parent. For example:
986 #
987 #
987 # 0 --- 1 --- 3 rev1 changes file foo
988 # 0 --- 1 --- 3 rev1 changes file foo
988 # \ / rev2 renames foo to bar and changes it
989 # \ / rev2 renames foo to bar and changes it
989 # \- 2 -/ rev3 should have bar with all changes and
990 # \- 2 -/ rev3 should have bar with all changes and
990 # should record that bar descends from
991 # should record that bar descends from
991 # bar in rev2 and foo in rev1
992 # bar in rev2 and foo in rev1
992 #
993 #
993 # this allows this merge to succeed:
994 # this allows this merge to succeed:
994 #
995 #
995 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
996 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
996 # \ / merging rev3 and rev4 should use bar@rev2
997 # \ / merging rev3 and rev4 should use bar@rev2
997 # \- 2 --- 4 as the merge base
998 # \- 2 --- 4 as the merge base
998 #
999 #
999
1000
1000 cfname = copy[0]
1001 cfname = copy[0]
1001 crev = manifest1.get(cfname)
1002 crev = manifest1.get(cfname)
1002 newfparent = fparent2
1003 newfparent = fparent2
1003
1004
1004 if manifest2: # branch merge
1005 if manifest2: # branch merge
1005 if fparent2 == nullid or crev is None: # copied on remote side
1006 if fparent2 == nullid or crev is None: # copied on remote side
1006 if cfname in manifest2:
1007 if cfname in manifest2:
1007 crev = manifest2[cfname]
1008 crev = manifest2[cfname]
1008 newfparent = fparent1
1009 newfparent = fparent1
1009
1010
1010 # find source in nearest ancestor if we've lost track
1011 # find source in nearest ancestor if we've lost track
1011 if not crev:
1012 if not crev:
1012 self.ui.debug(" %s: searching for copy revision for %s\n" %
1013 self.ui.debug(" %s: searching for copy revision for %s\n" %
1013 (fname, cfname))
1014 (fname, cfname))
1014 for ancestor in self[None].ancestors():
1015 for ancestor in self[None].ancestors():
1015 if cfname in ancestor:
1016 if cfname in ancestor:
1016 crev = ancestor[cfname].filenode()
1017 crev = ancestor[cfname].filenode()
1017 break
1018 break
1018
1019
1019 if crev:
1020 if crev:
1020 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1021 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1021 meta["copy"] = cfname
1022 meta["copy"] = cfname
1022 meta["copyrev"] = hex(crev)
1023 meta["copyrev"] = hex(crev)
1023 fparent1, fparent2 = nullid, newfparent
1024 fparent1, fparent2 = nullid, newfparent
1024 else:
1025 else:
1025 self.ui.warn(_("warning: can't find ancestor for '%s' "
1026 self.ui.warn(_("warning: can't find ancestor for '%s' "
1026 "copied from '%s'!\n") % (fname, cfname))
1027 "copied from '%s'!\n") % (fname, cfname))
1027
1028
1028 elif fparent2 != nullid:
1029 elif fparent2 != nullid:
1029 # is one parent an ancestor of the other?
1030 # is one parent an ancestor of the other?
1030 fparentancestor = flog.ancestor(fparent1, fparent2)
1031 fparentancestor = flog.ancestor(fparent1, fparent2)
1031 if fparentancestor == fparent1:
1032 if fparentancestor == fparent1:
1032 fparent1, fparent2 = fparent2, nullid
1033 fparent1, fparent2 = fparent2, nullid
1033 elif fparentancestor == fparent2:
1034 elif fparentancestor == fparent2:
1034 fparent2 = nullid
1035 fparent2 = nullid
1035
1036
1036 # is the file changed?
1037 # is the file changed?
1037 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1038 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1038 changelist.append(fname)
1039 changelist.append(fname)
1039 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1040 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1040
1041
1041 # are just the flags changed during merge?
1042 # are just the flags changed during merge?
1042 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1043 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1043 changelist.append(fname)
1044 changelist.append(fname)
1044
1045
1045 return fparent1
1046 return fparent1
1046
1047
1047 def commit(self, text="", user=None, date=None, match=None, force=False,
1048 def commit(self, text="", user=None, date=None, match=None, force=False,
1048 editor=False, extra={}):
1049 editor=False, extra={}):
1049 """Add a new revision to current repository.
1050 """Add a new revision to current repository.
1050
1051
1051 Revision information is gathered from the working directory,
1052 Revision information is gathered from the working directory,
1052 match can be used to filter the committed files. If editor is
1053 match can be used to filter the committed files. If editor is
1053 supplied, it is called to get a commit message.
1054 supplied, it is called to get a commit message.
1054 """
1055 """
1055
1056
1056 def fail(f, msg):
1057 def fail(f, msg):
1057 raise util.Abort('%s: %s' % (f, msg))
1058 raise util.Abort('%s: %s' % (f, msg))
1058
1059
1059 if not match:
1060 if not match:
1060 match = matchmod.always(self.root, '')
1061 match = matchmod.always(self.root, '')
1061
1062
1062 if not force:
1063 if not force:
1063 vdirs = []
1064 vdirs = []
1064 match.dir = vdirs.append
1065 match.dir = vdirs.append
1065 match.bad = fail
1066 match.bad = fail
1066
1067
1067 wlock = self.wlock()
1068 wlock = self.wlock()
1068 try:
1069 try:
1069 wctx = self[None]
1070 wctx = self[None]
1070 merge = len(wctx.parents()) > 1
1071 merge = len(wctx.parents()) > 1
1071
1072
1072 if (not force and merge and match and
1073 if (not force and merge and match and
1073 (match.files() or match.anypats())):
1074 (match.files() or match.anypats())):
1074 raise util.Abort(_('cannot partially commit a merge '
1075 raise util.Abort(_('cannot partially commit a merge '
1075 '(do not specify files or patterns)'))
1076 '(do not specify files or patterns)'))
1076
1077
1077 changes = self.status(match=match, clean=force)
1078 changes = self.status(match=match, clean=force)
1078 if force:
1079 if force:
1079 changes[0].extend(changes[6]) # mq may commit unchanged files
1080 changes[0].extend(changes[6]) # mq may commit unchanged files
1080
1081
1081 # check subrepos
1082 # check subrepos
1082 subs = []
1083 subs = []
1083 removedsubs = set()
1084 removedsubs = set()
1084 if '.hgsub' in wctx:
1085 if '.hgsub' in wctx:
1085 # only manage subrepos and .hgsubstate if .hgsub is present
1086 # only manage subrepos and .hgsubstate if .hgsub is present
1086 for p in wctx.parents():
1087 for p in wctx.parents():
1087 removedsubs.update(s for s in p.substate if match(s))
1088 removedsubs.update(s for s in p.substate if match(s))
1088 for s in wctx.substate:
1089 for s in wctx.substate:
1089 removedsubs.discard(s)
1090 removedsubs.discard(s)
1090 if match(s) and wctx.sub(s).dirty():
1091 if match(s) and wctx.sub(s).dirty():
1091 subs.append(s)
1092 subs.append(s)
1092 if (subs or removedsubs):
1093 if (subs or removedsubs):
1093 if (not match('.hgsub') and
1094 if (not match('.hgsub') and
1094 '.hgsub' in (wctx.modified() + wctx.added())):
1095 '.hgsub' in (wctx.modified() + wctx.added())):
1095 raise util.Abort(
1096 raise util.Abort(
1096 _("can't commit subrepos without .hgsub"))
1097 _("can't commit subrepos without .hgsub"))
1097 if '.hgsubstate' not in changes[0]:
1098 if '.hgsubstate' not in changes[0]:
1098 changes[0].insert(0, '.hgsubstate')
1099 changes[0].insert(0, '.hgsubstate')
1099 if '.hgsubstate' in changes[2]:
1100 if '.hgsubstate' in changes[2]:
1100 changes[2].remove('.hgsubstate')
1101 changes[2].remove('.hgsubstate')
1101 elif '.hgsub' in changes[2]:
1102 elif '.hgsub' in changes[2]:
1102 # clean up .hgsubstate when .hgsub is removed
1103 # clean up .hgsubstate when .hgsub is removed
1103 if ('.hgsubstate' in wctx and
1104 if ('.hgsubstate' in wctx and
1104 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1105 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1105 changes[2].insert(0, '.hgsubstate')
1106 changes[2].insert(0, '.hgsubstate')
1106
1107
1107 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1108 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1108 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1109 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1109 if changedsubs:
1110 if changedsubs:
1110 raise util.Abort(_("uncommitted changes in subrepo %s")
1111 raise util.Abort(_("uncommitted changes in subrepo %s")
1111 % changedsubs[0],
1112 % changedsubs[0],
1112 hint=_("use --subrepos for recursive commit"))
1113 hint=_("use --subrepos for recursive commit"))
1113
1114
1114 # make sure all explicit patterns are matched
1115 # make sure all explicit patterns are matched
1115 if not force and match.files():
1116 if not force and match.files():
1116 matched = set(changes[0] + changes[1] + changes[2])
1117 matched = set(changes[0] + changes[1] + changes[2])
1117
1118
1118 for f in match.files():
1119 for f in match.files():
1119 if f == '.' or f in matched or f in wctx.substate:
1120 if f == '.' or f in matched or f in wctx.substate:
1120 continue
1121 continue
1121 if f in changes[3]: # missing
1122 if f in changes[3]: # missing
1122 fail(f, _('file not found!'))
1123 fail(f, _('file not found!'))
1123 if f in vdirs: # visited directory
1124 if f in vdirs: # visited directory
1124 d = f + '/'
1125 d = f + '/'
1125 for mf in matched:
1126 for mf in matched:
1126 if mf.startswith(d):
1127 if mf.startswith(d):
1127 break
1128 break
1128 else:
1129 else:
1129 fail(f, _("no match under directory!"))
1130 fail(f, _("no match under directory!"))
1130 elif f not in self.dirstate:
1131 elif f not in self.dirstate:
1131 fail(f, _("file not tracked!"))
1132 fail(f, _("file not tracked!"))
1132
1133
1133 if (not force and not extra.get("close") and not merge
1134 if (not force and not extra.get("close") and not merge
1134 and not (changes[0] or changes[1] or changes[2])
1135 and not (changes[0] or changes[1] or changes[2])
1135 and wctx.branch() == wctx.p1().branch()):
1136 and wctx.branch() == wctx.p1().branch()):
1136 return None
1137 return None
1137
1138
1138 ms = mergemod.mergestate(self)
1139 ms = mergemod.mergestate(self)
1139 for f in changes[0]:
1140 for f in changes[0]:
1140 if f in ms and ms[f] == 'u':
1141 if f in ms and ms[f] == 'u':
1141 raise util.Abort(_("unresolved merge conflicts "
1142 raise util.Abort(_("unresolved merge conflicts "
1142 "(see hg help resolve)"))
1143 "(see hg help resolve)"))
1143
1144
1144 cctx = context.workingctx(self, text, user, date, extra, changes)
1145 cctx = context.workingctx(self, text, user, date, extra, changes)
1145 if editor:
1146 if editor:
1146 cctx._text = editor(self, cctx, subs)
1147 cctx._text = editor(self, cctx, subs)
1147 edited = (text != cctx._text)
1148 edited = (text != cctx._text)
1148
1149
1149 # commit subs
1150 # commit subs
1150 if subs or removedsubs:
1151 if subs or removedsubs:
1151 state = wctx.substate.copy()
1152 state = wctx.substate.copy()
1152 for s in sorted(subs):
1153 for s in sorted(subs):
1153 sub = wctx.sub(s)
1154 sub = wctx.sub(s)
1154 self.ui.status(_('committing subrepository %s\n') %
1155 self.ui.status(_('committing subrepository %s\n') %
1155 subrepo.subrelpath(sub))
1156 subrepo.subrelpath(sub))
1156 sr = sub.commit(cctx._text, user, date)
1157 sr = sub.commit(cctx._text, user, date)
1157 state[s] = (state[s][0], sr)
1158 state[s] = (state[s][0], sr)
1158 subrepo.writestate(self, state)
1159 subrepo.writestate(self, state)
1159
1160
1160 # Save commit message in case this transaction gets rolled back
1161 # Save commit message in case this transaction gets rolled back
1161 # (e.g. by a pretxncommit hook). Leave the content alone on
1162 # (e.g. by a pretxncommit hook). Leave the content alone on
1162 # the assumption that the user will use the same editor again.
1163 # the assumption that the user will use the same editor again.
1163 msgfn = self.savecommitmessage(cctx._text)
1164 msgfn = self.savecommitmessage(cctx._text)
1164
1165
1165 p1, p2 = self.dirstate.parents()
1166 p1, p2 = self.dirstate.parents()
1166 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1167 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1167 try:
1168 try:
1168 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1169 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1169 ret = self.commitctx(cctx, True)
1170 ret = self.commitctx(cctx, True)
1170 except:
1171 except:
1171 if edited:
1172 if edited:
1172 self.ui.write(
1173 self.ui.write(
1173 _('note: commit message saved in %s\n') % msgfn)
1174 _('note: commit message saved in %s\n') % msgfn)
1174 raise
1175 raise
1175
1176
1176 # update bookmarks, dirstate and mergestate
1177 # update bookmarks, dirstate and mergestate
1177 bookmarks.update(self, p1, ret)
1178 bookmarks.update(self, p1, ret)
1178 for f in changes[0] + changes[1]:
1179 for f in changes[0] + changes[1]:
1179 self.dirstate.normal(f)
1180 self.dirstate.normal(f)
1180 for f in changes[2]:
1181 for f in changes[2]:
1181 self.dirstate.drop(f)
1182 self.dirstate.drop(f)
1182 self.dirstate.setparents(ret)
1183 self.dirstate.setparents(ret)
1183 ms.reset()
1184 ms.reset()
1184 finally:
1185 finally:
1185 wlock.release()
1186 wlock.release()
1186
1187
1187 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1188 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1188 return ret
1189 return ret
1189
1190
1190 def commitctx(self, ctx, error=False):
1191 def commitctx(self, ctx, error=False):
1191 """Add a new revision to current repository.
1192 """Add a new revision to current repository.
1192 Revision information is passed via the context argument.
1193 Revision information is passed via the context argument.
1193 """
1194 """
1194
1195
1195 tr = lock = None
1196 tr = lock = None
1196 removed = list(ctx.removed())
1197 removed = list(ctx.removed())
1197 p1, p2 = ctx.p1(), ctx.p2()
1198 p1, p2 = ctx.p1(), ctx.p2()
1198 user = ctx.user()
1199 user = ctx.user()
1199
1200
1200 lock = self.lock()
1201 lock = self.lock()
1201 try:
1202 try:
1202 tr = self.transaction("commit")
1203 tr = self.transaction("commit")
1203 trp = weakref.proxy(tr)
1204 trp = weakref.proxy(tr)
1204
1205
1205 if ctx.files():
1206 if ctx.files():
1206 m1 = p1.manifest().copy()
1207 m1 = p1.manifest().copy()
1207 m2 = p2.manifest()
1208 m2 = p2.manifest()
1208
1209
1209 # check in files
1210 # check in files
1210 new = {}
1211 new = {}
1211 changed = []
1212 changed = []
1212 linkrev = len(self)
1213 linkrev = len(self)
1213 for f in sorted(ctx.modified() + ctx.added()):
1214 for f in sorted(ctx.modified() + ctx.added()):
1214 self.ui.note(f + "\n")
1215 self.ui.note(f + "\n")
1215 try:
1216 try:
1216 fctx = ctx[f]
1217 fctx = ctx[f]
1217 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1218 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1218 changed)
1219 changed)
1219 m1.set(f, fctx.flags())
1220 m1.set(f, fctx.flags())
1220 except OSError, inst:
1221 except OSError, inst:
1221 self.ui.warn(_("trouble committing %s!\n") % f)
1222 self.ui.warn(_("trouble committing %s!\n") % f)
1222 raise
1223 raise
1223 except IOError, inst:
1224 except IOError, inst:
1224 errcode = getattr(inst, 'errno', errno.ENOENT)
1225 errcode = getattr(inst, 'errno', errno.ENOENT)
1225 if error or errcode and errcode != errno.ENOENT:
1226 if error or errcode and errcode != errno.ENOENT:
1226 self.ui.warn(_("trouble committing %s!\n") % f)
1227 self.ui.warn(_("trouble committing %s!\n") % f)
1227 raise
1228 raise
1228 else:
1229 else:
1229 removed.append(f)
1230 removed.append(f)
1230
1231
1231 # update manifest
1232 # update manifest
1232 m1.update(new)
1233 m1.update(new)
1233 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1234 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1234 drop = [f for f in removed if f in m1]
1235 drop = [f for f in removed if f in m1]
1235 for f in drop:
1236 for f in drop:
1236 del m1[f]
1237 del m1[f]
1237 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1238 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1238 p2.manifestnode(), (new, drop))
1239 p2.manifestnode(), (new, drop))
1239 files = changed + removed
1240 files = changed + removed
1240 else:
1241 else:
1241 mn = p1.manifestnode()
1242 mn = p1.manifestnode()
1242 files = []
1243 files = []
1243
1244
1244 # update changelog
1245 # update changelog
1245 self.changelog.delayupdate()
1246 self.changelog.delayupdate()
1246 n = self.changelog.add(mn, files, ctx.description(),
1247 n = self.changelog.add(mn, files, ctx.description(),
1247 trp, p1.node(), p2.node(),
1248 trp, p1.node(), p2.node(),
1248 user, ctx.date(), ctx.extra().copy())
1249 user, ctx.date(), ctx.extra().copy())
1249 p = lambda: self.changelog.writepending() and self.root or ""
1250 p = lambda: self.changelog.writepending() and self.root or ""
1250 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1251 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1251 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1252 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1252 parent2=xp2, pending=p)
1253 parent2=xp2, pending=p)
1253 self.changelog.finalize(trp)
1254 self.changelog.finalize(trp)
1254 # set the new commit is proper phase
1255 # set the new commit is proper phase
1255 targetphase = self.ui.configint('phases', 'new-commit', 1)
1256 targetphase = self.ui.configint('phases', 'new-commit', 1)
1256 if targetphase:
1257 if targetphase:
1257 # retract boundary do not alter parent changeset.
1258 # retract boundary do not alter parent changeset.
1258 # if a parent have higher the resulting phase will
1259 # if a parent have higher the resulting phase will
1259 # be compliant anyway
1260 # be compliant anyway
1260 #
1261 #
1261 # if minimal phase was 0 we don't need to retract anything
1262 # if minimal phase was 0 we don't need to retract anything
1262 phases.retractboundary(self, targetphase, [n])
1263 phases.retractboundary(self, targetphase, [n])
1263 tr.close()
1264 tr.close()
1264
1265
1265 if self._branchcache:
1266 if self._branchcache:
1266 self.updatebranchcache()
1267 self.updatebranchcache()
1267 return n
1268 return n
1268 finally:
1269 finally:
1269 if tr:
1270 if tr:
1270 tr.release()
1271 tr.release()
1271 lock.release()
1272 lock.release()
1272
1273
1273 def destroyed(self):
1274 def destroyed(self):
1274 '''Inform the repository that nodes have been destroyed.
1275 '''Inform the repository that nodes have been destroyed.
1275 Intended for use by strip and rollback, so there's a common
1276 Intended for use by strip and rollback, so there's a common
1276 place for anything that has to be done after destroying history.'''
1277 place for anything that has to be done after destroying history.'''
1277 # XXX it might be nice if we could take the list of destroyed
1278 # XXX it might be nice if we could take the list of destroyed
1278 # nodes, but I don't see an easy way for rollback() to do that
1279 # nodes, but I don't see an easy way for rollback() to do that
1279
1280
1280 # Ensure the persistent tag cache is updated. Doing it now
1281 # Ensure the persistent tag cache is updated. Doing it now
1281 # means that the tag cache only has to worry about destroyed
1282 # means that the tag cache only has to worry about destroyed
1282 # heads immediately after a strip/rollback. That in turn
1283 # heads immediately after a strip/rollback. That in turn
1283 # guarantees that "cachetip == currenttip" (comparing both rev
1284 # guarantees that "cachetip == currenttip" (comparing both rev
1284 # and node) always means no nodes have been added or destroyed.
1285 # and node) always means no nodes have been added or destroyed.
1285
1286
1286 # XXX this is suboptimal when qrefresh'ing: we strip the current
1287 # XXX this is suboptimal when qrefresh'ing: we strip the current
1287 # head, refresh the tag cache, then immediately add a new head.
1288 # head, refresh the tag cache, then immediately add a new head.
1288 # But I think doing it this way is necessary for the "instant
1289 # But I think doing it this way is necessary for the "instant
1289 # tag cache retrieval" case to work.
1290 # tag cache retrieval" case to work.
1290 self.invalidatecaches()
1291 self.invalidatecaches()
1291
1292
1292 def walk(self, match, node=None):
1293 def walk(self, match, node=None):
1293 '''
1294 '''
1294 walk recursively through the directory tree or a given
1295 walk recursively through the directory tree or a given
1295 changeset, finding all files matched by the match
1296 changeset, finding all files matched by the match
1296 function
1297 function
1297 '''
1298 '''
1298 return self[node].walk(match)
1299 return self[node].walk(match)
1299
1300
1300 def status(self, node1='.', node2=None, match=None,
1301 def status(self, node1='.', node2=None, match=None,
1301 ignored=False, clean=False, unknown=False,
1302 ignored=False, clean=False, unknown=False,
1302 listsubrepos=False):
1303 listsubrepos=False):
1303 """return status of files between two nodes or node and working directory
1304 """return status of files between two nodes or node and working directory
1304
1305
1305 If node1 is None, use the first dirstate parent instead.
1306 If node1 is None, use the first dirstate parent instead.
1306 If node2 is None, compare node1 with working directory.
1307 If node2 is None, compare node1 with working directory.
1307 """
1308 """
1308
1309
1309 def mfmatches(ctx):
1310 def mfmatches(ctx):
1310 mf = ctx.manifest().copy()
1311 mf = ctx.manifest().copy()
1311 for fn in mf.keys():
1312 for fn in mf.keys():
1312 if not match(fn):
1313 if not match(fn):
1313 del mf[fn]
1314 del mf[fn]
1314 return mf
1315 return mf
1315
1316
1316 if isinstance(node1, context.changectx):
1317 if isinstance(node1, context.changectx):
1317 ctx1 = node1
1318 ctx1 = node1
1318 else:
1319 else:
1319 ctx1 = self[node1]
1320 ctx1 = self[node1]
1320 if isinstance(node2, context.changectx):
1321 if isinstance(node2, context.changectx):
1321 ctx2 = node2
1322 ctx2 = node2
1322 else:
1323 else:
1323 ctx2 = self[node2]
1324 ctx2 = self[node2]
1324
1325
1325 working = ctx2.rev() is None
1326 working = ctx2.rev() is None
1326 parentworking = working and ctx1 == self['.']
1327 parentworking = working and ctx1 == self['.']
1327 match = match or matchmod.always(self.root, self.getcwd())
1328 match = match or matchmod.always(self.root, self.getcwd())
1328 listignored, listclean, listunknown = ignored, clean, unknown
1329 listignored, listclean, listunknown = ignored, clean, unknown
1329
1330
1330 # load earliest manifest first for caching reasons
1331 # load earliest manifest first for caching reasons
1331 if not working and ctx2.rev() < ctx1.rev():
1332 if not working and ctx2.rev() < ctx1.rev():
1332 ctx2.manifest()
1333 ctx2.manifest()
1333
1334
1334 if not parentworking:
1335 if not parentworking:
1335 def bad(f, msg):
1336 def bad(f, msg):
1336 if f not in ctx1:
1337 if f not in ctx1:
1337 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1338 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1338 match.bad = bad
1339 match.bad = bad
1339
1340
1340 if working: # we need to scan the working dir
1341 if working: # we need to scan the working dir
1341 subrepos = []
1342 subrepos = []
1342 if '.hgsub' in self.dirstate:
1343 if '.hgsub' in self.dirstate:
1343 subrepos = ctx2.substate.keys()
1344 subrepos = ctx2.substate.keys()
1344 s = self.dirstate.status(match, subrepos, listignored,
1345 s = self.dirstate.status(match, subrepos, listignored,
1345 listclean, listunknown)
1346 listclean, listunknown)
1346 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1347 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1347
1348
1348 # check for any possibly clean files
1349 # check for any possibly clean files
1349 if parentworking and cmp:
1350 if parentworking and cmp:
1350 fixup = []
1351 fixup = []
1351 # do a full compare of any files that might have changed
1352 # do a full compare of any files that might have changed
1352 for f in sorted(cmp):
1353 for f in sorted(cmp):
1353 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1354 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1354 or ctx1[f].cmp(ctx2[f])):
1355 or ctx1[f].cmp(ctx2[f])):
1355 modified.append(f)
1356 modified.append(f)
1356 else:
1357 else:
1357 fixup.append(f)
1358 fixup.append(f)
1358
1359
1359 # update dirstate for files that are actually clean
1360 # update dirstate for files that are actually clean
1360 if fixup:
1361 if fixup:
1361 if listclean:
1362 if listclean:
1362 clean += fixup
1363 clean += fixup
1363
1364
1364 try:
1365 try:
1365 # updating the dirstate is optional
1366 # updating the dirstate is optional
1366 # so we don't wait on the lock
1367 # so we don't wait on the lock
1367 wlock = self.wlock(False)
1368 wlock = self.wlock(False)
1368 try:
1369 try:
1369 for f in fixup:
1370 for f in fixup:
1370 self.dirstate.normal(f)
1371 self.dirstate.normal(f)
1371 finally:
1372 finally:
1372 wlock.release()
1373 wlock.release()
1373 except error.LockError:
1374 except error.LockError:
1374 pass
1375 pass
1375
1376
1376 if not parentworking:
1377 if not parentworking:
1377 mf1 = mfmatches(ctx1)
1378 mf1 = mfmatches(ctx1)
1378 if working:
1379 if working:
1379 # we are comparing working dir against non-parent
1380 # we are comparing working dir against non-parent
1380 # generate a pseudo-manifest for the working dir
1381 # generate a pseudo-manifest for the working dir
1381 mf2 = mfmatches(self['.'])
1382 mf2 = mfmatches(self['.'])
1382 for f in cmp + modified + added:
1383 for f in cmp + modified + added:
1383 mf2[f] = None
1384 mf2[f] = None
1384 mf2.set(f, ctx2.flags(f))
1385 mf2.set(f, ctx2.flags(f))
1385 for f in removed:
1386 for f in removed:
1386 if f in mf2:
1387 if f in mf2:
1387 del mf2[f]
1388 del mf2[f]
1388 else:
1389 else:
1389 # we are comparing two revisions
1390 # we are comparing two revisions
1390 deleted, unknown, ignored = [], [], []
1391 deleted, unknown, ignored = [], [], []
1391 mf2 = mfmatches(ctx2)
1392 mf2 = mfmatches(ctx2)
1392
1393
1393 modified, added, clean = [], [], []
1394 modified, added, clean = [], [], []
1394 for fn in mf2:
1395 for fn in mf2:
1395 if fn in mf1:
1396 if fn in mf1:
1396 if (fn not in deleted and
1397 if (fn not in deleted and
1397 (mf1.flags(fn) != mf2.flags(fn) or
1398 (mf1.flags(fn) != mf2.flags(fn) or
1398 (mf1[fn] != mf2[fn] and
1399 (mf1[fn] != mf2[fn] and
1399 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1400 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1400 modified.append(fn)
1401 modified.append(fn)
1401 elif listclean:
1402 elif listclean:
1402 clean.append(fn)
1403 clean.append(fn)
1403 del mf1[fn]
1404 del mf1[fn]
1404 elif fn not in deleted:
1405 elif fn not in deleted:
1405 added.append(fn)
1406 added.append(fn)
1406 removed = mf1.keys()
1407 removed = mf1.keys()
1407
1408
1408 if working and modified and not self.dirstate._checklink:
1409 if working and modified and not self.dirstate._checklink:
1409 # Symlink placeholders may get non-symlink-like contents
1410 # Symlink placeholders may get non-symlink-like contents
1410 # via user error or dereferencing by NFS or Samba servers,
1411 # via user error or dereferencing by NFS or Samba servers,
1411 # so we filter out any placeholders that don't look like a
1412 # so we filter out any placeholders that don't look like a
1412 # symlink
1413 # symlink
1413 sane = []
1414 sane = []
1414 for f in modified:
1415 for f in modified:
1415 if ctx2.flags(f) == 'l':
1416 if ctx2.flags(f) == 'l':
1416 d = ctx2[f].data()
1417 d = ctx2[f].data()
1417 if len(d) >= 1024 or '\n' in d or util.binary(d):
1418 if len(d) >= 1024 or '\n' in d or util.binary(d):
1418 self.ui.debug('ignoring suspect symlink placeholder'
1419 self.ui.debug('ignoring suspect symlink placeholder'
1419 ' "%s"\n' % f)
1420 ' "%s"\n' % f)
1420 continue
1421 continue
1421 sane.append(f)
1422 sane.append(f)
1422 modified = sane
1423 modified = sane
1423
1424
1424 r = modified, added, removed, deleted, unknown, ignored, clean
1425 r = modified, added, removed, deleted, unknown, ignored, clean
1425
1426
1426 if listsubrepos:
1427 if listsubrepos:
1427 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1428 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1428 if working:
1429 if working:
1429 rev2 = None
1430 rev2 = None
1430 else:
1431 else:
1431 rev2 = ctx2.substate[subpath][1]
1432 rev2 = ctx2.substate[subpath][1]
1432 try:
1433 try:
1433 submatch = matchmod.narrowmatcher(subpath, match)
1434 submatch = matchmod.narrowmatcher(subpath, match)
1434 s = sub.status(rev2, match=submatch, ignored=listignored,
1435 s = sub.status(rev2, match=submatch, ignored=listignored,
1435 clean=listclean, unknown=listunknown,
1436 clean=listclean, unknown=listunknown,
1436 listsubrepos=True)
1437 listsubrepos=True)
1437 for rfiles, sfiles in zip(r, s):
1438 for rfiles, sfiles in zip(r, s):
1438 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1439 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1439 except error.LookupError:
1440 except error.LookupError:
1440 self.ui.status(_("skipping missing subrepository: %s\n")
1441 self.ui.status(_("skipping missing subrepository: %s\n")
1441 % subpath)
1442 % subpath)
1442
1443
1443 for l in r:
1444 for l in r:
1444 l.sort()
1445 l.sort()
1445 return r
1446 return r
1446
1447
1447 def heads(self, start=None):
1448 def heads(self, start=None):
1448 heads = self.changelog.heads(start)
1449 heads = self.changelog.heads(start)
1449 # sort the output in rev descending order
1450 # sort the output in rev descending order
1450 return sorted(heads, key=self.changelog.rev, reverse=True)
1451 return sorted(heads, key=self.changelog.rev, reverse=True)
1451
1452
1452 def branchheads(self, branch=None, start=None, closed=False):
1453 def branchheads(self, branch=None, start=None, closed=False):
1453 '''return a (possibly filtered) list of heads for the given branch
1454 '''return a (possibly filtered) list of heads for the given branch
1454
1455
1455 Heads are returned in topological order, from newest to oldest.
1456 Heads are returned in topological order, from newest to oldest.
1456 If branch is None, use the dirstate branch.
1457 If branch is None, use the dirstate branch.
1457 If start is not None, return only heads reachable from start.
1458 If start is not None, return only heads reachable from start.
1458 If closed is True, return heads that are marked as closed as well.
1459 If closed is True, return heads that are marked as closed as well.
1459 '''
1460 '''
1460 if branch is None:
1461 if branch is None:
1461 branch = self[None].branch()
1462 branch = self[None].branch()
1462 branches = self.branchmap()
1463 branches = self.branchmap()
1463 if branch not in branches:
1464 if branch not in branches:
1464 return []
1465 return []
1465 # the cache returns heads ordered lowest to highest
1466 # the cache returns heads ordered lowest to highest
1466 bheads = list(reversed(branches[branch]))
1467 bheads = list(reversed(branches[branch]))
1467 if start is not None:
1468 if start is not None:
1468 # filter out the heads that cannot be reached from startrev
1469 # filter out the heads that cannot be reached from startrev
1469 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1470 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1470 bheads = [h for h in bheads if h in fbheads]
1471 bheads = [h for h in bheads if h in fbheads]
1471 if not closed:
1472 if not closed:
1472 bheads = [h for h in bheads if
1473 bheads = [h for h in bheads if
1473 ('close' not in self.changelog.read(h)[5])]
1474 ('close' not in self.changelog.read(h)[5])]
1474 return bheads
1475 return bheads
1475
1476
1476 def branches(self, nodes):
1477 def branches(self, nodes):
1477 if not nodes:
1478 if not nodes:
1478 nodes = [self.changelog.tip()]
1479 nodes = [self.changelog.tip()]
1479 b = []
1480 b = []
1480 for n in nodes:
1481 for n in nodes:
1481 t = n
1482 t = n
1482 while True:
1483 while True:
1483 p = self.changelog.parents(n)
1484 p = self.changelog.parents(n)
1484 if p[1] != nullid or p[0] == nullid:
1485 if p[1] != nullid or p[0] == nullid:
1485 b.append((t, n, p[0], p[1]))
1486 b.append((t, n, p[0], p[1]))
1486 break
1487 break
1487 n = p[0]
1488 n = p[0]
1488 return b
1489 return b
1489
1490
1490 def between(self, pairs):
1491 def between(self, pairs):
1491 r = []
1492 r = []
1492
1493
1493 for top, bottom in pairs:
1494 for top, bottom in pairs:
1494 n, l, i = top, [], 0
1495 n, l, i = top, [], 0
1495 f = 1
1496 f = 1
1496
1497
1497 while n != bottom and n != nullid:
1498 while n != bottom and n != nullid:
1498 p = self.changelog.parents(n)[0]
1499 p = self.changelog.parents(n)[0]
1499 if i == f:
1500 if i == f:
1500 l.append(n)
1501 l.append(n)
1501 f = f * 2
1502 f = f * 2
1502 n = p
1503 n = p
1503 i += 1
1504 i += 1
1504
1505
1505 r.append(l)
1506 r.append(l)
1506
1507
1507 return r
1508 return r
1508
1509
1509 def pull(self, remote, heads=None, force=False):
1510 def pull(self, remote, heads=None, force=False):
1510 lock = self.lock()
1511 lock = self.lock()
1511 try:
1512 try:
1512 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1513 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1513 force=force)
1514 force=force)
1514 common, fetch, rheads = tmp
1515 common, fetch, rheads = tmp
1515 if not fetch:
1516 if not fetch:
1516 self.ui.status(_("no changes found\n"))
1517 self.ui.status(_("no changes found\n"))
1517 added = []
1518 added = []
1518 result = 0
1519 result = 0
1519 else:
1520 else:
1520 if heads is None and list(common) == [nullid]:
1521 if heads is None and list(common) == [nullid]:
1521 self.ui.status(_("requesting all changes\n"))
1522 self.ui.status(_("requesting all changes\n"))
1522 elif heads is None and remote.capable('changegroupsubset'):
1523 elif heads is None and remote.capable('changegroupsubset'):
1523 # issue1320, avoid a race if remote changed after discovery
1524 # issue1320, avoid a race if remote changed after discovery
1524 heads = rheads
1525 heads = rheads
1525
1526
1526 if remote.capable('getbundle'):
1527 if remote.capable('getbundle'):
1527 cg = remote.getbundle('pull', common=common,
1528 cg = remote.getbundle('pull', common=common,
1528 heads=heads or rheads)
1529 heads=heads or rheads)
1529 elif heads is None:
1530 elif heads is None:
1530 cg = remote.changegroup(fetch, 'pull')
1531 cg = remote.changegroup(fetch, 'pull')
1531 elif not remote.capable('changegroupsubset'):
1532 elif not remote.capable('changegroupsubset'):
1532 raise util.Abort(_("partial pull cannot be done because "
1533 raise util.Abort(_("partial pull cannot be done because "
1533 "other repository doesn't support "
1534 "other repository doesn't support "
1534 "changegroupsubset."))
1535 "changegroupsubset."))
1535 else:
1536 else:
1536 cg = remote.changegroupsubset(fetch, heads, 'pull')
1537 cg = remote.changegroupsubset(fetch, heads, 'pull')
1537 clstart = len(self.changelog)
1538 clstart = len(self.changelog)
1538 result = self.addchangegroup(cg, 'pull', remote.url())
1539 result = self.addchangegroup(cg, 'pull', remote.url())
1539 clend = len(self.changelog)
1540 clend = len(self.changelog)
1540 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1541 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1541
1542
1542
1543
1543 # Get remote phases data from remote
1544 # Get remote phases data from remote
1544 remotephases = remote.listkeys('phases')
1545 remotephases = remote.listkeys('phases')
1545 publishing = bool(remotephases.get('publishing', False))
1546 publishing = bool(remotephases.get('publishing', False))
1546 if remotephases and not publishing:
1547 if remotephases and not publishing:
1547 # remote is new and unpublishing
1548 # remote is new and unpublishing
1548 subset = common + added
1549 subset = common + added
1549 rheads, rroots = phases.analyzeremotephases(self, subset,
1550 rheads, rroots = phases.analyzeremotephases(self, subset,
1550 remotephases)
1551 remotephases)
1551 for phase, boundary in enumerate(rheads):
1552 for phase, boundary in enumerate(rheads):
1552 phases.advanceboundary(self, phase, boundary)
1553 phases.advanceboundary(self, phase, boundary)
1553 else:
1554 else:
1554 # Remote is old or publishing all common changesets
1555 # Remote is old or publishing all common changesets
1555 # should be seen as public
1556 # should be seen as public
1556 phases.advanceboundary(self, 0, common + added)
1557 phases.advanceboundary(self, 0, common + added)
1557 finally:
1558 finally:
1558 lock.release()
1559 lock.release()
1559
1560
1560 return result
1561 return result
1561
1562
1562 def checkpush(self, force, revs):
1563 def checkpush(self, force, revs):
1563 """Extensions can override this function if additional checks have
1564 """Extensions can override this function if additional checks have
1564 to be performed before pushing, or call it if they override push
1565 to be performed before pushing, or call it if they override push
1565 command.
1566 command.
1566 """
1567 """
1567 pass
1568 pass
1568
1569
1569 def push(self, remote, force=False, revs=None, newbranch=False):
1570 def push(self, remote, force=False, revs=None, newbranch=False):
1570 '''Push outgoing changesets (limited by revs) from the current
1571 '''Push outgoing changesets (limited by revs) from the current
1571 repository to remote. Return an integer:
1572 repository to remote. Return an integer:
1572 - 0 means HTTP error *or* nothing to push
1573 - 0 means HTTP error *or* nothing to push
1573 - 1 means we pushed and remote head count is unchanged *or*
1574 - 1 means we pushed and remote head count is unchanged *or*
1574 we have outgoing changesets but refused to push
1575 we have outgoing changesets but refused to push
1575 - other values as described by addchangegroup()
1576 - other values as described by addchangegroup()
1576 '''
1577 '''
1577 # there are two ways to push to remote repo:
1578 # there are two ways to push to remote repo:
1578 #
1579 #
1579 # addchangegroup assumes local user can lock remote
1580 # addchangegroup assumes local user can lock remote
1580 # repo (local filesystem, old ssh servers).
1581 # repo (local filesystem, old ssh servers).
1581 #
1582 #
1582 # unbundle assumes local user cannot lock remote repo (new ssh
1583 # unbundle assumes local user cannot lock remote repo (new ssh
1583 # servers, http servers).
1584 # servers, http servers).
1584
1585
1585 self.checkpush(force, revs)
1586 self.checkpush(force, revs)
1586 lock = None
1587 lock = None
1587 unbundle = remote.capable('unbundle')
1588 unbundle = remote.capable('unbundle')
1588 if not unbundle:
1589 if not unbundle:
1589 lock = remote.lock()
1590 lock = remote.lock()
1590 try:
1591 try:
1591 # get local lock as we might write phase data
1592 # get local lock as we might write phase data
1592 locallock = self.lock()
1593 locallock = self.lock()
1593 try:
1594 try:
1594 cg, remote_heads, fut = discovery.prepush(self, remote, force,
1595 cg, remote_heads, fut = discovery.prepush(self, remote, force,
1595 revs, newbranch)
1596 revs, newbranch)
1596 ret = remote_heads
1597 ret = remote_heads
1597 # create a callback for addchangegroup.
1598 # create a callback for addchangegroup.
1598 # If will be used branch of the conditionnal too.
1599 # If will be used branch of the conditionnal too.
1599 if cg is not None:
1600 if cg is not None:
1600 if unbundle:
1601 if unbundle:
1601 # local repo finds heads on server, finds out what
1602 # local repo finds heads on server, finds out what
1602 # revs it must push. once revs transferred, if server
1603 # revs it must push. once revs transferred, if server
1603 # finds it has different heads (someone else won
1604 # finds it has different heads (someone else won
1604 # commit/push race), server aborts.
1605 # commit/push race), server aborts.
1605 if force:
1606 if force:
1606 remote_heads = ['force']
1607 remote_heads = ['force']
1607 # ssh: return remote's addchangegroup()
1608 # ssh: return remote's addchangegroup()
1608 # http: return remote's addchangegroup() or 0 for error
1609 # http: return remote's addchangegroup() or 0 for error
1609 ret = remote.unbundle(cg, remote_heads, 'push')
1610 ret = remote.unbundle(cg, remote_heads, 'push')
1610 else:
1611 else:
1611 # we return an integer indicating remote head count change
1612 # we return an integer indicating remote head count change
1612 ret = remote.addchangegroup(cg, 'push', self.url())
1613 ret = remote.addchangegroup(cg, 'push', self.url())
1613
1614
1614 # even when we don't push, exchanging phase data is useful
1615 # even when we don't push, exchanging phase data is useful
1615 remotephases = remote.listkeys('phases')
1616 remotephases = remote.listkeys('phases')
1616 if not remotephases: # old server or public only repo
1617 if not remotephases: # old server or public only repo
1617 phases.advanceboundary(self, 0, fut)
1618 phases.advanceboundary(self, 0, fut)
1618 # don't push any phase data as there is nothing to push
1619 # don't push any phase data as there is nothing to push
1619 else:
1620 else:
1620 ana = phases.analyzeremotephases(self, fut, remotephases)
1621 ana = phases.analyzeremotephases(self, fut, remotephases)
1621 rheads, rroots = ana
1622 rheads, rroots = ana
1622 ### Apply remote phase on local
1623 ### Apply remote phase on local
1623 if remotephases.get('publishing', False):
1624 if remotephases.get('publishing', False):
1624 phases.advanceboundary(self, 0, fut)
1625 phases.advanceboundary(self, 0, fut)
1625 else: # publish = False
1626 else: # publish = False
1626 for phase, rpheads in enumerate(rheads):
1627 for phase, rpheads in enumerate(rheads):
1627 phases.advanceboundary(self, phase, rpheads)
1628 phases.advanceboundary(self, phase, rpheads)
1628 ### Apply local phase on remote
1629 ### Apply local phase on remote
1629 #
1630 #
1630 # XXX If push failed we should use strict common and not
1631 # XXX If push failed we should use strict common and not
1631 # future to avoir pushing phase data on unknown changeset.
1632 # future to avoir pushing phase data on unknown changeset.
1632 # This is to done later.
1633 # This is to done later.
1633 futctx = [self[n] for n in fut if n != nullid]
1634 futctx = [self[n] for n in fut if n != nullid]
1634 for phase in phases.trackedphases[::-1]:
1635 for phase in phases.trackedphases[::-1]:
1635 prevphase = phase -1
1636 prevphase = phase -1
1636 # get all candidate for head in previous phase
1637 # get all candidate for head in previous phase
1637 inprev = [ctx for ctx in futctx
1638 inprev = [ctx for ctx in futctx
1638 if ctx.phase() == prevphase]
1639 if ctx.phase() == prevphase]
1639 for newremotehead in self.set('heads(%ld & (%ln::))',
1640 for newremotehead in self.set('heads(%ld & (%ln::))',
1640 inprev, rroots[phase]):
1641 inprev, rroots[phase]):
1641 r = remote.pushkey('phases',
1642 r = remote.pushkey('phases',
1642 newremotehead.hex(),
1643 newremotehead.hex(),
1643 str(phase), str(prevphase))
1644 str(phase), str(prevphase))
1644 if not r:
1645 if not r:
1645 self.ui.warn(_('updating phase of %s'
1646 self.ui.warn(_('updating phase of %s'
1646 'to %s failed!\n')
1647 'to %s failed!\n')
1647 % (newremotehead, prevphase))
1648 % (newremotehead, prevphase))
1648 finally:
1649 finally:
1649 locallock.release()
1650 locallock.release()
1650 finally:
1651 finally:
1651 if lock is not None:
1652 if lock is not None:
1652 lock.release()
1653 lock.release()
1653
1654
1654 self.ui.debug("checking for updated bookmarks\n")
1655 self.ui.debug("checking for updated bookmarks\n")
1655 rb = remote.listkeys('bookmarks')
1656 rb = remote.listkeys('bookmarks')
1656 for k in rb.keys():
1657 for k in rb.keys():
1657 if k in self._bookmarks:
1658 if k in self._bookmarks:
1658 nr, nl = rb[k], hex(self._bookmarks[k])
1659 nr, nl = rb[k], hex(self._bookmarks[k])
1659 if nr in self:
1660 if nr in self:
1660 cr = self[nr]
1661 cr = self[nr]
1661 cl = self[nl]
1662 cl = self[nl]
1662 if cl in cr.descendants():
1663 if cl in cr.descendants():
1663 r = remote.pushkey('bookmarks', k, nr, nl)
1664 r = remote.pushkey('bookmarks', k, nr, nl)
1664 if r:
1665 if r:
1665 self.ui.status(_("updating bookmark %s\n") % k)
1666 self.ui.status(_("updating bookmark %s\n") % k)
1666 else:
1667 else:
1667 self.ui.warn(_('updating bookmark %s'
1668 self.ui.warn(_('updating bookmark %s'
1668 ' failed!\n') % k)
1669 ' failed!\n') % k)
1669
1670
1670 return ret
1671 return ret
1671
1672
1672 def changegroupinfo(self, nodes, source):
1673 def changegroupinfo(self, nodes, source):
1673 if self.ui.verbose or source == 'bundle':
1674 if self.ui.verbose or source == 'bundle':
1674 self.ui.status(_("%d changesets found\n") % len(nodes))
1675 self.ui.status(_("%d changesets found\n") % len(nodes))
1675 if self.ui.debugflag:
1676 if self.ui.debugflag:
1676 self.ui.debug("list of changesets:\n")
1677 self.ui.debug("list of changesets:\n")
1677 for node in nodes:
1678 for node in nodes:
1678 self.ui.debug("%s\n" % hex(node))
1679 self.ui.debug("%s\n" % hex(node))
1679
1680
1680 def changegroupsubset(self, bases, heads, source):
1681 def changegroupsubset(self, bases, heads, source):
1681 """Compute a changegroup consisting of all the nodes that are
1682 """Compute a changegroup consisting of all the nodes that are
1682 descendants of any of the bases and ancestors of any of the heads.
1683 descendants of any of the bases and ancestors of any of the heads.
1683 Return a chunkbuffer object whose read() method will return
1684 Return a chunkbuffer object whose read() method will return
1684 successive changegroup chunks.
1685 successive changegroup chunks.
1685
1686
1686 It is fairly complex as determining which filenodes and which
1687 It is fairly complex as determining which filenodes and which
1687 manifest nodes need to be included for the changeset to be complete
1688 manifest nodes need to be included for the changeset to be complete
1688 is non-trivial.
1689 is non-trivial.
1689
1690
1690 Another wrinkle is doing the reverse, figuring out which changeset in
1691 Another wrinkle is doing the reverse, figuring out which changeset in
1691 the changegroup a particular filenode or manifestnode belongs to.
1692 the changegroup a particular filenode or manifestnode belongs to.
1692 """
1693 """
1693 cl = self.changelog
1694 cl = self.changelog
1694 if not bases:
1695 if not bases:
1695 bases = [nullid]
1696 bases = [nullid]
1696 csets, bases, heads = cl.nodesbetween(bases, heads)
1697 csets, bases, heads = cl.nodesbetween(bases, heads)
1697 # We assume that all ancestors of bases are known
1698 # We assume that all ancestors of bases are known
1698 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1699 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1699 return self._changegroupsubset(common, csets, heads, source)
1700 return self._changegroupsubset(common, csets, heads, source)
1700
1701
1701 def getbundle(self, source, heads=None, common=None):
1702 def getbundle(self, source, heads=None, common=None):
1702 """Like changegroupsubset, but returns the set difference between the
1703 """Like changegroupsubset, but returns the set difference between the
1703 ancestors of heads and the ancestors common.
1704 ancestors of heads and the ancestors common.
1704
1705
1705 If heads is None, use the local heads. If common is None, use [nullid].
1706 If heads is None, use the local heads. If common is None, use [nullid].
1706
1707
1707 The nodes in common might not all be known locally due to the way the
1708 The nodes in common might not all be known locally due to the way the
1708 current discovery protocol works.
1709 current discovery protocol works.
1709 """
1710 """
1710 cl = self.changelog
1711 cl = self.changelog
1711 if common:
1712 if common:
1712 nm = cl.nodemap
1713 nm = cl.nodemap
1713 common = [n for n in common if n in nm]
1714 common = [n for n in common if n in nm]
1714 else:
1715 else:
1715 common = [nullid]
1716 common = [nullid]
1716 if not heads:
1717 if not heads:
1717 heads = cl.heads()
1718 heads = cl.heads()
1718 common, missing = cl.findcommonmissing(common, heads)
1719 common, missing = cl.findcommonmissing(common, heads)
1719 if not missing:
1720 if not missing:
1720 return None
1721 return None
1721 return self._changegroupsubset(common, missing, heads, source)
1722 return self._changegroupsubset(common, missing, heads, source)
1722
1723
1723 def _changegroupsubset(self, commonrevs, csets, heads, source):
1724 def _changegroupsubset(self, commonrevs, csets, heads, source):
1724
1725
1725 cl = self.changelog
1726 cl = self.changelog
1726 mf = self.manifest
1727 mf = self.manifest
1727 mfs = {} # needed manifests
1728 mfs = {} # needed manifests
1728 fnodes = {} # needed file nodes
1729 fnodes = {} # needed file nodes
1729 changedfiles = set()
1730 changedfiles = set()
1730 fstate = ['', {}]
1731 fstate = ['', {}]
1731 count = [0]
1732 count = [0]
1732
1733
1733 # can we go through the fast path ?
1734 # can we go through the fast path ?
1734 heads.sort()
1735 heads.sort()
1735 if heads == sorted(self.heads()):
1736 if heads == sorted(self.heads()):
1736 return self._changegroup(csets, source)
1737 return self._changegroup(csets, source)
1737
1738
1738 # slow path
1739 # slow path
1739 self.hook('preoutgoing', throw=True, source=source)
1740 self.hook('preoutgoing', throw=True, source=source)
1740 self.changegroupinfo(csets, source)
1741 self.changegroupinfo(csets, source)
1741
1742
1742 # filter any nodes that claim to be part of the known set
1743 # filter any nodes that claim to be part of the known set
1743 def prune(revlog, missing):
1744 def prune(revlog, missing):
1744 return [n for n in missing
1745 return [n for n in missing
1745 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1746 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1746
1747
1747 def lookup(revlog, x):
1748 def lookup(revlog, x):
1748 if revlog == cl:
1749 if revlog == cl:
1749 c = cl.read(x)
1750 c = cl.read(x)
1750 changedfiles.update(c[3])
1751 changedfiles.update(c[3])
1751 mfs.setdefault(c[0], x)
1752 mfs.setdefault(c[0], x)
1752 count[0] += 1
1753 count[0] += 1
1753 self.ui.progress(_('bundling'), count[0],
1754 self.ui.progress(_('bundling'), count[0],
1754 unit=_('changesets'), total=len(csets))
1755 unit=_('changesets'), total=len(csets))
1755 return x
1756 return x
1756 elif revlog == mf:
1757 elif revlog == mf:
1757 clnode = mfs[x]
1758 clnode = mfs[x]
1758 mdata = mf.readfast(x)
1759 mdata = mf.readfast(x)
1759 for f in changedfiles:
1760 for f in changedfiles:
1760 if f in mdata:
1761 if f in mdata:
1761 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1762 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1762 count[0] += 1
1763 count[0] += 1
1763 self.ui.progress(_('bundling'), count[0],
1764 self.ui.progress(_('bundling'), count[0],
1764 unit=_('manifests'), total=len(mfs))
1765 unit=_('manifests'), total=len(mfs))
1765 return mfs[x]
1766 return mfs[x]
1766 else:
1767 else:
1767 self.ui.progress(
1768 self.ui.progress(
1768 _('bundling'), count[0], item=fstate[0],
1769 _('bundling'), count[0], item=fstate[0],
1769 unit=_('files'), total=len(changedfiles))
1770 unit=_('files'), total=len(changedfiles))
1770 return fstate[1][x]
1771 return fstate[1][x]
1771
1772
1772 bundler = changegroup.bundle10(lookup)
1773 bundler = changegroup.bundle10(lookup)
1773 reorder = self.ui.config('bundle', 'reorder', 'auto')
1774 reorder = self.ui.config('bundle', 'reorder', 'auto')
1774 if reorder == 'auto':
1775 if reorder == 'auto':
1775 reorder = None
1776 reorder = None
1776 else:
1777 else:
1777 reorder = util.parsebool(reorder)
1778 reorder = util.parsebool(reorder)
1778
1779
1779 def gengroup():
1780 def gengroup():
1780 # Create a changenode group generator that will call our functions
1781 # Create a changenode group generator that will call our functions
1781 # back to lookup the owning changenode and collect information.
1782 # back to lookup the owning changenode and collect information.
1782 for chunk in cl.group(csets, bundler, reorder=reorder):
1783 for chunk in cl.group(csets, bundler, reorder=reorder):
1783 yield chunk
1784 yield chunk
1784 self.ui.progress(_('bundling'), None)
1785 self.ui.progress(_('bundling'), None)
1785
1786
1786 # Create a generator for the manifestnodes that calls our lookup
1787 # Create a generator for the manifestnodes that calls our lookup
1787 # and data collection functions back.
1788 # and data collection functions back.
1788 count[0] = 0
1789 count[0] = 0
1789 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1790 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1790 yield chunk
1791 yield chunk
1791 self.ui.progress(_('bundling'), None)
1792 self.ui.progress(_('bundling'), None)
1792
1793
1793 mfs.clear()
1794 mfs.clear()
1794
1795
1795 # Go through all our files in order sorted by name.
1796 # Go through all our files in order sorted by name.
1796 count[0] = 0
1797 count[0] = 0
1797 for fname in sorted(changedfiles):
1798 for fname in sorted(changedfiles):
1798 filerevlog = self.file(fname)
1799 filerevlog = self.file(fname)
1799 if not len(filerevlog):
1800 if not len(filerevlog):
1800 raise util.Abort(_("empty or missing revlog for %s") % fname)
1801 raise util.Abort(_("empty or missing revlog for %s") % fname)
1801 fstate[0] = fname
1802 fstate[0] = fname
1802 fstate[1] = fnodes.pop(fname, {})
1803 fstate[1] = fnodes.pop(fname, {})
1803
1804
1804 nodelist = prune(filerevlog, fstate[1])
1805 nodelist = prune(filerevlog, fstate[1])
1805 if nodelist:
1806 if nodelist:
1806 count[0] += 1
1807 count[0] += 1
1807 yield bundler.fileheader(fname)
1808 yield bundler.fileheader(fname)
1808 for chunk in filerevlog.group(nodelist, bundler, reorder):
1809 for chunk in filerevlog.group(nodelist, bundler, reorder):
1809 yield chunk
1810 yield chunk
1810
1811
1811 # Signal that no more groups are left.
1812 # Signal that no more groups are left.
1812 yield bundler.close()
1813 yield bundler.close()
1813 self.ui.progress(_('bundling'), None)
1814 self.ui.progress(_('bundling'), None)
1814
1815
1815 if csets:
1816 if csets:
1816 self.hook('outgoing', node=hex(csets[0]), source=source)
1817 self.hook('outgoing', node=hex(csets[0]), source=source)
1817
1818
1818 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1819 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1819
1820
1820 def changegroup(self, basenodes, source):
1821 def changegroup(self, basenodes, source):
1821 # to avoid a race we use changegroupsubset() (issue1320)
1822 # to avoid a race we use changegroupsubset() (issue1320)
1822 return self.changegroupsubset(basenodes, self.heads(), source)
1823 return self.changegroupsubset(basenodes, self.heads(), source)
1823
1824
1824 def _changegroup(self, nodes, source):
1825 def _changegroup(self, nodes, source):
1825 """Compute the changegroup of all nodes that we have that a recipient
1826 """Compute the changegroup of all nodes that we have that a recipient
1826 doesn't. Return a chunkbuffer object whose read() method will return
1827 doesn't. Return a chunkbuffer object whose read() method will return
1827 successive changegroup chunks.
1828 successive changegroup chunks.
1828
1829
1829 This is much easier than the previous function as we can assume that
1830 This is much easier than the previous function as we can assume that
1830 the recipient has any changenode we aren't sending them.
1831 the recipient has any changenode we aren't sending them.
1831
1832
1832 nodes is the set of nodes to send"""
1833 nodes is the set of nodes to send"""
1833
1834
1834 cl = self.changelog
1835 cl = self.changelog
1835 mf = self.manifest
1836 mf = self.manifest
1836 mfs = {}
1837 mfs = {}
1837 changedfiles = set()
1838 changedfiles = set()
1838 fstate = ['']
1839 fstate = ['']
1839 count = [0]
1840 count = [0]
1840
1841
1841 self.hook('preoutgoing', throw=True, source=source)
1842 self.hook('preoutgoing', throw=True, source=source)
1842 self.changegroupinfo(nodes, source)
1843 self.changegroupinfo(nodes, source)
1843
1844
1844 revset = set([cl.rev(n) for n in nodes])
1845 revset = set([cl.rev(n) for n in nodes])
1845
1846
1846 def gennodelst(log):
1847 def gennodelst(log):
1847 return [log.node(r) for r in log if log.linkrev(r) in revset]
1848 return [log.node(r) for r in log if log.linkrev(r) in revset]
1848
1849
1849 def lookup(revlog, x):
1850 def lookup(revlog, x):
1850 if revlog == cl:
1851 if revlog == cl:
1851 c = cl.read(x)
1852 c = cl.read(x)
1852 changedfiles.update(c[3])
1853 changedfiles.update(c[3])
1853 mfs.setdefault(c[0], x)
1854 mfs.setdefault(c[0], x)
1854 count[0] += 1
1855 count[0] += 1
1855 self.ui.progress(_('bundling'), count[0],
1856 self.ui.progress(_('bundling'), count[0],
1856 unit=_('changesets'), total=len(nodes))
1857 unit=_('changesets'), total=len(nodes))
1857 return x
1858 return x
1858 elif revlog == mf:
1859 elif revlog == mf:
1859 count[0] += 1
1860 count[0] += 1
1860 self.ui.progress(_('bundling'), count[0],
1861 self.ui.progress(_('bundling'), count[0],
1861 unit=_('manifests'), total=len(mfs))
1862 unit=_('manifests'), total=len(mfs))
1862 return cl.node(revlog.linkrev(revlog.rev(x)))
1863 return cl.node(revlog.linkrev(revlog.rev(x)))
1863 else:
1864 else:
1864 self.ui.progress(
1865 self.ui.progress(
1865 _('bundling'), count[0], item=fstate[0],
1866 _('bundling'), count[0], item=fstate[0],
1866 total=len(changedfiles), unit=_('files'))
1867 total=len(changedfiles), unit=_('files'))
1867 return cl.node(revlog.linkrev(revlog.rev(x)))
1868 return cl.node(revlog.linkrev(revlog.rev(x)))
1868
1869
1869 bundler = changegroup.bundle10(lookup)
1870 bundler = changegroup.bundle10(lookup)
1870 reorder = self.ui.config('bundle', 'reorder', 'auto')
1871 reorder = self.ui.config('bundle', 'reorder', 'auto')
1871 if reorder == 'auto':
1872 if reorder == 'auto':
1872 reorder = None
1873 reorder = None
1873 else:
1874 else:
1874 reorder = util.parsebool(reorder)
1875 reorder = util.parsebool(reorder)
1875
1876
1876 def gengroup():
1877 def gengroup():
1877 '''yield a sequence of changegroup chunks (strings)'''
1878 '''yield a sequence of changegroup chunks (strings)'''
1878 # construct a list of all changed files
1879 # construct a list of all changed files
1879
1880
1880 for chunk in cl.group(nodes, bundler, reorder=reorder):
1881 for chunk in cl.group(nodes, bundler, reorder=reorder):
1881 yield chunk
1882 yield chunk
1882 self.ui.progress(_('bundling'), None)
1883 self.ui.progress(_('bundling'), None)
1883
1884
1884 count[0] = 0
1885 count[0] = 0
1885 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1886 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1886 yield chunk
1887 yield chunk
1887 self.ui.progress(_('bundling'), None)
1888 self.ui.progress(_('bundling'), None)
1888
1889
1889 count[0] = 0
1890 count[0] = 0
1890 for fname in sorted(changedfiles):
1891 for fname in sorted(changedfiles):
1891 filerevlog = self.file(fname)
1892 filerevlog = self.file(fname)
1892 if not len(filerevlog):
1893 if not len(filerevlog):
1893 raise util.Abort(_("empty or missing revlog for %s") % fname)
1894 raise util.Abort(_("empty or missing revlog for %s") % fname)
1894 fstate[0] = fname
1895 fstate[0] = fname
1895 nodelist = gennodelst(filerevlog)
1896 nodelist = gennodelst(filerevlog)
1896 if nodelist:
1897 if nodelist:
1897 count[0] += 1
1898 count[0] += 1
1898 yield bundler.fileheader(fname)
1899 yield bundler.fileheader(fname)
1899 for chunk in filerevlog.group(nodelist, bundler, reorder):
1900 for chunk in filerevlog.group(nodelist, bundler, reorder):
1900 yield chunk
1901 yield chunk
1901 yield bundler.close()
1902 yield bundler.close()
1902 self.ui.progress(_('bundling'), None)
1903 self.ui.progress(_('bundling'), None)
1903
1904
1904 if nodes:
1905 if nodes:
1905 self.hook('outgoing', node=hex(nodes[0]), source=source)
1906 self.hook('outgoing', node=hex(nodes[0]), source=source)
1906
1907
1907 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1908 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1908
1909
1909 def addchangegroup(self, source, srctype, url, emptyok=False):
1910 def addchangegroup(self, source, srctype, url, emptyok=False):
1910 """Add the changegroup returned by source.read() to this repo.
1911 """Add the changegroup returned by source.read() to this repo.
1911 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1912 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1912 the URL of the repo where this changegroup is coming from.
1913 the URL of the repo where this changegroup is coming from.
1913
1914
1914 Return an integer summarizing the change to this repo:
1915 Return an integer summarizing the change to this repo:
1915 - nothing changed or no source: 0
1916 - nothing changed or no source: 0
1916 - more heads than before: 1+added heads (2..n)
1917 - more heads than before: 1+added heads (2..n)
1917 - fewer heads than before: -1-removed heads (-2..-n)
1918 - fewer heads than before: -1-removed heads (-2..-n)
1918 - number of heads stays the same: 1
1919 - number of heads stays the same: 1
1919 """
1920 """
1920 def csmap(x):
1921 def csmap(x):
1921 self.ui.debug("add changeset %s\n" % short(x))
1922 self.ui.debug("add changeset %s\n" % short(x))
1922 return len(cl)
1923 return len(cl)
1923
1924
1924 def revmap(x):
1925 def revmap(x):
1925 return cl.rev(x)
1926 return cl.rev(x)
1926
1927
1927 if not source:
1928 if not source:
1928 return 0
1929 return 0
1929
1930
1930 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1931 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1931
1932
1932 changesets = files = revisions = 0
1933 changesets = files = revisions = 0
1933 efiles = set()
1934 efiles = set()
1934
1935
1935 # write changelog data to temp files so concurrent readers will not see
1936 # write changelog data to temp files so concurrent readers will not see
1936 # inconsistent view
1937 # inconsistent view
1937 cl = self.changelog
1938 cl = self.changelog
1938 cl.delayupdate()
1939 cl.delayupdate()
1939 oldheads = cl.heads()
1940 oldheads = cl.heads()
1940
1941
1941 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1942 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1942 try:
1943 try:
1943 trp = weakref.proxy(tr)
1944 trp = weakref.proxy(tr)
1944 # pull off the changeset group
1945 # pull off the changeset group
1945 self.ui.status(_("adding changesets\n"))
1946 self.ui.status(_("adding changesets\n"))
1946 clstart = len(cl)
1947 clstart = len(cl)
1947 class prog(object):
1948 class prog(object):
1948 step = _('changesets')
1949 step = _('changesets')
1949 count = 1
1950 count = 1
1950 ui = self.ui
1951 ui = self.ui
1951 total = None
1952 total = None
1952 def __call__(self):
1953 def __call__(self):
1953 self.ui.progress(self.step, self.count, unit=_('chunks'),
1954 self.ui.progress(self.step, self.count, unit=_('chunks'),
1954 total=self.total)
1955 total=self.total)
1955 self.count += 1
1956 self.count += 1
1956 pr = prog()
1957 pr = prog()
1957 source.callback = pr
1958 source.callback = pr
1958
1959
1959 source.changelogheader()
1960 source.changelogheader()
1960 if (cl.addgroup(source, csmap, trp) is None
1961 if (cl.addgroup(source, csmap, trp) is None
1961 and not emptyok):
1962 and not emptyok):
1962 raise util.Abort(_("received changelog group is empty"))
1963 raise util.Abort(_("received changelog group is empty"))
1963 clend = len(cl)
1964 clend = len(cl)
1964 changesets = clend - clstart
1965 changesets = clend - clstart
1965 for c in xrange(clstart, clend):
1966 for c in xrange(clstart, clend):
1966 efiles.update(self[c].files())
1967 efiles.update(self[c].files())
1967 efiles = len(efiles)
1968 efiles = len(efiles)
1968 self.ui.progress(_('changesets'), None)
1969 self.ui.progress(_('changesets'), None)
1969
1970
1970 # pull off the manifest group
1971 # pull off the manifest group
1971 self.ui.status(_("adding manifests\n"))
1972 self.ui.status(_("adding manifests\n"))
1972 pr.step = _('manifests')
1973 pr.step = _('manifests')
1973 pr.count = 1
1974 pr.count = 1
1974 pr.total = changesets # manifests <= changesets
1975 pr.total = changesets # manifests <= changesets
1975 # no need to check for empty manifest group here:
1976 # no need to check for empty manifest group here:
1976 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1977 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1977 # no new manifest will be created and the manifest group will
1978 # no new manifest will be created and the manifest group will
1978 # be empty during the pull
1979 # be empty during the pull
1979 source.manifestheader()
1980 source.manifestheader()
1980 self.manifest.addgroup(source, revmap, trp)
1981 self.manifest.addgroup(source, revmap, trp)
1981 self.ui.progress(_('manifests'), None)
1982 self.ui.progress(_('manifests'), None)
1982
1983
1983 needfiles = {}
1984 needfiles = {}
1984 if self.ui.configbool('server', 'validate', default=False):
1985 if self.ui.configbool('server', 'validate', default=False):
1985 # validate incoming csets have their manifests
1986 # validate incoming csets have their manifests
1986 for cset in xrange(clstart, clend):
1987 for cset in xrange(clstart, clend):
1987 mfest = self.changelog.read(self.changelog.node(cset))[0]
1988 mfest = self.changelog.read(self.changelog.node(cset))[0]
1988 mfest = self.manifest.readdelta(mfest)
1989 mfest = self.manifest.readdelta(mfest)
1989 # store file nodes we must see
1990 # store file nodes we must see
1990 for f, n in mfest.iteritems():
1991 for f, n in mfest.iteritems():
1991 needfiles.setdefault(f, set()).add(n)
1992 needfiles.setdefault(f, set()).add(n)
1992
1993
1993 # process the files
1994 # process the files
1994 self.ui.status(_("adding file changes\n"))
1995 self.ui.status(_("adding file changes\n"))
1995 pr.step = _('files')
1996 pr.step = _('files')
1996 pr.count = 1
1997 pr.count = 1
1997 pr.total = efiles
1998 pr.total = efiles
1998 source.callback = None
1999 source.callback = None
1999
2000
2000 while True:
2001 while True:
2001 chunkdata = source.filelogheader()
2002 chunkdata = source.filelogheader()
2002 if not chunkdata:
2003 if not chunkdata:
2003 break
2004 break
2004 f = chunkdata["filename"]
2005 f = chunkdata["filename"]
2005 self.ui.debug("adding %s revisions\n" % f)
2006 self.ui.debug("adding %s revisions\n" % f)
2006 pr()
2007 pr()
2007 fl = self.file(f)
2008 fl = self.file(f)
2008 o = len(fl)
2009 o = len(fl)
2009 if fl.addgroup(source, revmap, trp) is None:
2010 if fl.addgroup(source, revmap, trp) is None:
2010 raise util.Abort(_("received file revlog group is empty"))
2011 raise util.Abort(_("received file revlog group is empty"))
2011 revisions += len(fl) - o
2012 revisions += len(fl) - o
2012 files += 1
2013 files += 1
2013 if f in needfiles:
2014 if f in needfiles:
2014 needs = needfiles[f]
2015 needs = needfiles[f]
2015 for new in xrange(o, len(fl)):
2016 for new in xrange(o, len(fl)):
2016 n = fl.node(new)
2017 n = fl.node(new)
2017 if n in needs:
2018 if n in needs:
2018 needs.remove(n)
2019 needs.remove(n)
2019 if not needs:
2020 if not needs:
2020 del needfiles[f]
2021 del needfiles[f]
2021 self.ui.progress(_('files'), None)
2022 self.ui.progress(_('files'), None)
2022
2023
2023 for f, needs in needfiles.iteritems():
2024 for f, needs in needfiles.iteritems():
2024 fl = self.file(f)
2025 fl = self.file(f)
2025 for n in needs:
2026 for n in needs:
2026 try:
2027 try:
2027 fl.rev(n)
2028 fl.rev(n)
2028 except error.LookupError:
2029 except error.LookupError:
2029 raise util.Abort(
2030 raise util.Abort(
2030 _('missing file data for %s:%s - run hg verify') %
2031 _('missing file data for %s:%s - run hg verify') %
2031 (f, hex(n)))
2032 (f, hex(n)))
2032
2033
2033 dh = 0
2034 dh = 0
2034 if oldheads:
2035 if oldheads:
2035 heads = cl.heads()
2036 heads = cl.heads()
2036 dh = len(heads) - len(oldheads)
2037 dh = len(heads) - len(oldheads)
2037 for h in heads:
2038 for h in heads:
2038 if h not in oldheads and 'close' in self[h].extra():
2039 if h not in oldheads and 'close' in self[h].extra():
2039 dh -= 1
2040 dh -= 1
2040 htext = ""
2041 htext = ""
2041 if dh:
2042 if dh:
2042 htext = _(" (%+d heads)") % dh
2043 htext = _(" (%+d heads)") % dh
2043
2044
2044 self.ui.status(_("added %d changesets"
2045 self.ui.status(_("added %d changesets"
2045 " with %d changes to %d files%s\n")
2046 " with %d changes to %d files%s\n")
2046 % (changesets, revisions, files, htext))
2047 % (changesets, revisions, files, htext))
2047
2048
2048 if changesets > 0:
2049 if changesets > 0:
2049 p = lambda: cl.writepending() and self.root or ""
2050 p = lambda: cl.writepending() and self.root or ""
2050 self.hook('pretxnchangegroup', throw=True,
2051 self.hook('pretxnchangegroup', throw=True,
2051 node=hex(cl.node(clstart)), source=srctype,
2052 node=hex(cl.node(clstart)), source=srctype,
2052 url=url, pending=p)
2053 url=url, pending=p)
2053
2054
2054 added = [cl.node(r) for r in xrange(clstart, clend)]
2055 added = [cl.node(r) for r in xrange(clstart, clend)]
2055 publishing = self.ui.configbool('phases', 'publish', True)
2056 publishing = self.ui.configbool('phases', 'publish', True)
2056 if publishing and srctype == 'push':
2057 if publishing and srctype == 'push':
2057 # Old server can not push the boundary themself.
2058 # Old server can not push the boundary themself.
2058 # This clause ensure pushed changeset are alway marked as public
2059 # This clause ensure pushed changeset are alway marked as public
2059 phases.advanceboundary(self, 0, added)
2060 phases.advanceboundary(self, 0, added)
2060 elif srctype != 'strip': # strip should not touch boundary at all
2061 elif srctype != 'strip': # strip should not touch boundary at all
2061 phases.retractboundary(self, 1, added)
2062 phases.retractboundary(self, 1, added)
2062
2063
2063 # make changelog see real files again
2064 # make changelog see real files again
2064 cl.finalize(trp)
2065 cl.finalize(trp)
2065
2066
2066 tr.close()
2067 tr.close()
2067
2068
2068 if changesets > 0:
2069 if changesets > 0:
2069 def runhooks():
2070 def runhooks():
2070 # forcefully update the on-disk branch cache
2071 # forcefully update the on-disk branch cache
2071 self.ui.debug("updating the branch cache\n")
2072 self.ui.debug("updating the branch cache\n")
2072 self.updatebranchcache()
2073 self.updatebranchcache()
2073 self.hook("changegroup", node=hex(cl.node(clstart)),
2074 self.hook("changegroup", node=hex(cl.node(clstart)),
2074 source=srctype, url=url)
2075 source=srctype, url=url)
2075
2076
2076 for n in added:
2077 for n in added:
2077 self.hook("incoming", node=hex(n), source=srctype,
2078 self.hook("incoming", node=hex(n), source=srctype,
2078 url=url)
2079 url=url)
2079 self._afterlock(runhooks)
2080 self._afterlock(runhooks)
2080
2081
2081 finally:
2082 finally:
2082 tr.release()
2083 tr.release()
2083 # never return 0 here:
2084 # never return 0 here:
2084 if dh < 0:
2085 if dh < 0:
2085 return dh - 1
2086 return dh - 1
2086 else:
2087 else:
2087 return dh + 1
2088 return dh + 1
2088
2089
2089 def stream_in(self, remote, requirements):
2090 def stream_in(self, remote, requirements):
2090 lock = self.lock()
2091 lock = self.lock()
2091 try:
2092 try:
2092 fp = remote.stream_out()
2093 fp = remote.stream_out()
2093 l = fp.readline()
2094 l = fp.readline()
2094 try:
2095 try:
2095 resp = int(l)
2096 resp = int(l)
2096 except ValueError:
2097 except ValueError:
2097 raise error.ResponseError(
2098 raise error.ResponseError(
2098 _('Unexpected response from remote server:'), l)
2099 _('Unexpected response from remote server:'), l)
2099 if resp == 1:
2100 if resp == 1:
2100 raise util.Abort(_('operation forbidden by server'))
2101 raise util.Abort(_('operation forbidden by server'))
2101 elif resp == 2:
2102 elif resp == 2:
2102 raise util.Abort(_('locking the remote repository failed'))
2103 raise util.Abort(_('locking the remote repository failed'))
2103 elif resp != 0:
2104 elif resp != 0:
2104 raise util.Abort(_('the server sent an unknown error code'))
2105 raise util.Abort(_('the server sent an unknown error code'))
2105 self.ui.status(_('streaming all changes\n'))
2106 self.ui.status(_('streaming all changes\n'))
2106 l = fp.readline()
2107 l = fp.readline()
2107 try:
2108 try:
2108 total_files, total_bytes = map(int, l.split(' ', 1))
2109 total_files, total_bytes = map(int, l.split(' ', 1))
2109 except (ValueError, TypeError):
2110 except (ValueError, TypeError):
2110 raise error.ResponseError(
2111 raise error.ResponseError(
2111 _('Unexpected response from remote server:'), l)
2112 _('Unexpected response from remote server:'), l)
2112 self.ui.status(_('%d files to transfer, %s of data\n') %
2113 self.ui.status(_('%d files to transfer, %s of data\n') %
2113 (total_files, util.bytecount(total_bytes)))
2114 (total_files, util.bytecount(total_bytes)))
2114 start = time.time()
2115 start = time.time()
2115 for i in xrange(total_files):
2116 for i in xrange(total_files):
2116 # XXX doesn't support '\n' or '\r' in filenames
2117 # XXX doesn't support '\n' or '\r' in filenames
2117 l = fp.readline()
2118 l = fp.readline()
2118 try:
2119 try:
2119 name, size = l.split('\0', 1)
2120 name, size = l.split('\0', 1)
2120 size = int(size)
2121 size = int(size)
2121 except (ValueError, TypeError):
2122 except (ValueError, TypeError):
2122 raise error.ResponseError(
2123 raise error.ResponseError(
2123 _('Unexpected response from remote server:'), l)
2124 _('Unexpected response from remote server:'), l)
2124 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2125 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2125 # for backwards compat, name was partially encoded
2126 # for backwards compat, name was partially encoded
2126 ofp = self.sopener(store.decodedir(name), 'w')
2127 ofp = self.sopener(store.decodedir(name), 'w')
2127 for chunk in util.filechunkiter(fp, limit=size):
2128 for chunk in util.filechunkiter(fp, limit=size):
2128 ofp.write(chunk)
2129 ofp.write(chunk)
2129 ofp.close()
2130 ofp.close()
2130 elapsed = time.time() - start
2131 elapsed = time.time() - start
2131 if elapsed <= 0:
2132 if elapsed <= 0:
2132 elapsed = 0.001
2133 elapsed = 0.001
2133 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2134 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2134 (util.bytecount(total_bytes), elapsed,
2135 (util.bytecount(total_bytes), elapsed,
2135 util.bytecount(total_bytes / elapsed)))
2136 util.bytecount(total_bytes / elapsed)))
2136
2137
2137 # new requirements = old non-format requirements + new format-related
2138 # new requirements = old non-format requirements + new format-related
2138 # requirements from the streamed-in repository
2139 # requirements from the streamed-in repository
2139 requirements.update(set(self.requirements) - self.supportedformats)
2140 requirements.update(set(self.requirements) - self.supportedformats)
2140 self._applyrequirements(requirements)
2141 self._applyrequirements(requirements)
2141 self._writerequirements()
2142 self._writerequirements()
2142
2143
2143 self.invalidate()
2144 self.invalidate()
2144 return len(self.heads()) + 1
2145 return len(self.heads()) + 1
2145 finally:
2146 finally:
2146 lock.release()
2147 lock.release()
2147
2148
2148 def clone(self, remote, heads=[], stream=False):
2149 def clone(self, remote, heads=[], stream=False):
2149 '''clone remote repository.
2150 '''clone remote repository.
2150
2151
2151 keyword arguments:
2152 keyword arguments:
2152 heads: list of revs to clone (forces use of pull)
2153 heads: list of revs to clone (forces use of pull)
2153 stream: use streaming clone if possible'''
2154 stream: use streaming clone if possible'''
2154
2155
2155 # now, all clients that can request uncompressed clones can
2156 # now, all clients that can request uncompressed clones can
2156 # read repo formats supported by all servers that can serve
2157 # read repo formats supported by all servers that can serve
2157 # them.
2158 # them.
2158
2159
2159 # if revlog format changes, client will have to check version
2160 # if revlog format changes, client will have to check version
2160 # and format flags on "stream" capability, and use
2161 # and format flags on "stream" capability, and use
2161 # uncompressed only if compatible.
2162 # uncompressed only if compatible.
2162
2163
2163 if stream and not heads:
2164 if stream and not heads:
2164 # 'stream' means remote revlog format is revlogv1 only
2165 # 'stream' means remote revlog format is revlogv1 only
2165 if remote.capable('stream'):
2166 if remote.capable('stream'):
2166 return self.stream_in(remote, set(('revlogv1',)))
2167 return self.stream_in(remote, set(('revlogv1',)))
2167 # otherwise, 'streamreqs' contains the remote revlog format
2168 # otherwise, 'streamreqs' contains the remote revlog format
2168 streamreqs = remote.capable('streamreqs')
2169 streamreqs = remote.capable('streamreqs')
2169 if streamreqs:
2170 if streamreqs:
2170 streamreqs = set(streamreqs.split(','))
2171 streamreqs = set(streamreqs.split(','))
2171 # if we support it, stream in and adjust our requirements
2172 # if we support it, stream in and adjust our requirements
2172 if not streamreqs - self.supportedformats:
2173 if not streamreqs - self.supportedformats:
2173 return self.stream_in(remote, streamreqs)
2174 return self.stream_in(remote, streamreqs)
2174 return self.pull(remote, heads)
2175 return self.pull(remote, heads)
2175
2176
2176 def pushkey(self, namespace, key, old, new):
2177 def pushkey(self, namespace, key, old, new):
2177 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2178 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2178 old=old, new=new)
2179 old=old, new=new)
2179 ret = pushkey.push(self, namespace, key, old, new)
2180 ret = pushkey.push(self, namespace, key, old, new)
2180 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2181 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2181 ret=ret)
2182 ret=ret)
2182 return ret
2183 return ret
2183
2184
2184 def listkeys(self, namespace):
2185 def listkeys(self, namespace):
2185 self.hook('prelistkeys', throw=True, namespace=namespace)
2186 self.hook('prelistkeys', throw=True, namespace=namespace)
2186 values = pushkey.list(self, namespace)
2187 values = pushkey.list(self, namespace)
2187 self.hook('listkeys', namespace=namespace, values=values)
2188 self.hook('listkeys', namespace=namespace, values=values)
2188 return values
2189 return values
2189
2190
2190 def debugwireargs(self, one, two, three=None, four=None, five=None):
2191 def debugwireargs(self, one, two, three=None, four=None, five=None):
2191 '''used to test argument passing over the wire'''
2192 '''used to test argument passing over the wire'''
2192 return "%s %s %s %s %s" % (one, two, three, four, five)
2193 return "%s %s %s %s %s" % (one, two, three, four, five)
2193
2194
2194 def savecommitmessage(self, text):
2195 def savecommitmessage(self, text):
2195 fp = self.opener('last-message.txt', 'wb')
2196 fp = self.opener('last-message.txt', 'wb')
2196 try:
2197 try:
2197 fp.write(text)
2198 fp.write(text)
2198 finally:
2199 finally:
2199 fp.close()
2200 fp.close()
2200 return self.pathto(fp.name[len(self.root)+1:])
2201 return self.pathto(fp.name[len(self.root)+1:])
2201
2202
2202 # used to avoid circular references so destructors work
2203 # used to avoid circular references so destructors work
2203 def aftertrans(files):
2204 def aftertrans(files):
2204 renamefiles = [tuple(t) for t in files]
2205 renamefiles = [tuple(t) for t in files]
2205 def a():
2206 def a():
2206 for src, dest in renamefiles:
2207 for src, dest in renamefiles:
2207 util.rename(src, dest)
2208 util.rename(src, dest)
2208 return a
2209 return a
2209
2210
2210 def undoname(fn):
2211 def undoname(fn):
2211 base, name = os.path.split(fn)
2212 base, name = os.path.split(fn)
2212 assert name.startswith('journal')
2213 assert name.startswith('journal')
2213 return os.path.join(base, name.replace('journal', 'undo', 1))
2214 return os.path.join(base, name.replace('journal', 'undo', 1))
2214
2215
2215 def instance(ui, path, create):
2216 def instance(ui, path, create):
2216 return localrepository(ui, util.urllocalpath(path), create)
2217 return localrepository(ui, util.urllocalpath(path), create)
2217
2218
2218 def islocal(path):
2219 def islocal(path):
2219 return True
2220 return True
@@ -1,813 +1,814
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 import util, error, osutil, revset, similar, encoding
9 import util, error, osutil, revset, similar, encoding
10 import match as matchmod
10 import match as matchmod
11 import os, errno, re, stat, sys, glob
11 import os, errno, re, stat, sys, glob
12
12
13 def checkfilename(f):
13 def checkfilename(f):
14 '''Check that the filename f is an acceptable filename for a tracked file'''
14 '''Check that the filename f is an acceptable filename for a tracked file'''
15 if '\r' in f or '\n' in f:
15 if '\r' in f or '\n' in f:
16 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
16 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
17
17
18 def checkportable(ui, f):
18 def checkportable(ui, f):
19 '''Check if filename f is portable and warn or abort depending on config'''
19 '''Check if filename f is portable and warn or abort depending on config'''
20 checkfilename(f)
20 checkfilename(f)
21 abort, warn = checkportabilityalert(ui)
21 abort, warn = checkportabilityalert(ui)
22 if abort or warn:
22 if abort or warn:
23 msg = util.checkwinfilename(f)
23 msg = util.checkwinfilename(f)
24 if msg:
24 if msg:
25 msg = "%s: %r" % (msg, f)
25 msg = "%s: %r" % (msg, f)
26 if abort:
26 if abort:
27 raise util.Abort(msg)
27 raise util.Abort(msg)
28 ui.warn(_("warning: %s\n") % msg)
28 ui.warn(_("warning: %s\n") % msg)
29
29
30 def checkportabilityalert(ui):
30 def checkportabilityalert(ui):
31 '''check if the user's config requests nothing, a warning, or abort for
31 '''check if the user's config requests nothing, a warning, or abort for
32 non-portable filenames'''
32 non-portable filenames'''
33 val = ui.config('ui', 'portablefilenames', 'warn')
33 val = ui.config('ui', 'portablefilenames', 'warn')
34 lval = val.lower()
34 lval = val.lower()
35 bval = util.parsebool(val)
35 bval = util.parsebool(val)
36 abort = os.name == 'nt' or lval == 'abort'
36 abort = os.name == 'nt' or lval == 'abort'
37 warn = bval or lval == 'warn'
37 warn = bval or lval == 'warn'
38 if bval is None and not (warn or abort or lval == 'ignore'):
38 if bval is None and not (warn or abort or lval == 'ignore'):
39 raise error.ConfigError(
39 raise error.ConfigError(
40 _("ui.portablefilenames value is invalid ('%s')") % val)
40 _("ui.portablefilenames value is invalid ('%s')") % val)
41 return abort, warn
41 return abort, warn
42
42
43 class casecollisionauditor(object):
43 class casecollisionauditor(object):
44 def __init__(self, ui, abort, existingiter):
44 def __init__(self, ui, abort, existingiter):
45 self._ui = ui
45 self._ui = ui
46 self._abort = abort
46 self._abort = abort
47 self._map = {}
47 self._map = {}
48 for f in existingiter:
48 for f in existingiter:
49 self._map[encoding.lower(f)] = f
49 self._map[encoding.lower(f)] = f
50
50
51 def __call__(self, f):
51 def __call__(self, f):
52 fl = encoding.lower(f)
52 fl = encoding.lower(f)
53 map = self._map
53 map = self._map
54 if fl in map and map[fl] != f:
54 if fl in map and map[fl] != f:
55 msg = _('possible case-folding collision for %s') % f
55 msg = _('possible case-folding collision for %s') % f
56 if self._abort:
56 if self._abort:
57 raise util.Abort(msg)
57 raise util.Abort(msg)
58 self._ui.warn(_("warning: %s\n") % msg)
58 self._ui.warn(_("warning: %s\n") % msg)
59 map[fl] = f
59 map[fl] = f
60
60
61 class pathauditor(object):
61 class pathauditor(object):
62 '''ensure that a filesystem path contains no banned components.
62 '''ensure that a filesystem path contains no banned components.
63 the following properties of a path are checked:
63 the following properties of a path are checked:
64
64
65 - ends with a directory separator
65 - ends with a directory separator
66 - under top-level .hg
66 - under top-level .hg
67 - starts at the root of a windows drive
67 - starts at the root of a windows drive
68 - contains ".."
68 - contains ".."
69 - traverses a symlink (e.g. a/symlink_here/b)
69 - traverses a symlink (e.g. a/symlink_here/b)
70 - inside a nested repository (a callback can be used to approve
70 - inside a nested repository (a callback can be used to approve
71 some nested repositories, e.g., subrepositories)
71 some nested repositories, e.g., subrepositories)
72 '''
72 '''
73
73
74 def __init__(self, root, callback=None):
74 def __init__(self, root, callback=None):
75 self.audited = set()
75 self.audited = set()
76 self.auditeddir = set()
76 self.auditeddir = set()
77 self.root = root
77 self.root = root
78 self.callback = callback
78 self.callback = callback
79 if os.path.lexists(root) and not util.checkcase(root):
79 if os.path.lexists(root) and not util.checkcase(root):
80 self.normcase = util.normcase
80 self.normcase = util.normcase
81 else:
81 else:
82 self.normcase = lambda x: x
82 self.normcase = lambda x: x
83
83
84 def __call__(self, path):
84 def __call__(self, path):
85 '''Check the relative path.
85 '''Check the relative path.
86 path may contain a pattern (e.g. foodir/**.txt)'''
86 path may contain a pattern (e.g. foodir/**.txt)'''
87
87
88 path = util.localpath(path)
88 normpath = self.normcase(path)
89 normpath = self.normcase(path)
89 if normpath in self.audited:
90 if normpath in self.audited:
90 return
91 return
91 # AIX ignores "/" at end of path, others raise EISDIR.
92 # AIX ignores "/" at end of path, others raise EISDIR.
92 if util.endswithsep(path):
93 if util.endswithsep(path):
93 raise util.Abort(_("path ends in directory separator: %s") % path)
94 raise util.Abort(_("path ends in directory separator: %s") % path)
94 parts = util.splitpath(path)
95 parts = util.splitpath(path)
95 if (os.path.splitdrive(path)[0]
96 if (os.path.splitdrive(path)[0]
96 or parts[0].lower() in ('.hg', '.hg.', '')
97 or parts[0].lower() in ('.hg', '.hg.', '')
97 or os.pardir in parts):
98 or os.pardir in parts):
98 raise util.Abort(_("path contains illegal component: %s") % path)
99 raise util.Abort(_("path contains illegal component: %s") % path)
99 if '.hg' in path.lower():
100 if '.hg' in path.lower():
100 lparts = [p.lower() for p in parts]
101 lparts = [p.lower() for p in parts]
101 for p in '.hg', '.hg.':
102 for p in '.hg', '.hg.':
102 if p in lparts[1:]:
103 if p in lparts[1:]:
103 pos = lparts.index(p)
104 pos = lparts.index(p)
104 base = os.path.join(*parts[:pos])
105 base = os.path.join(*parts[:pos])
105 raise util.Abort(_("path '%s' is inside nested repo %r")
106 raise util.Abort(_("path '%s' is inside nested repo %r")
106 % (path, base))
107 % (path, base))
107
108
108 normparts = util.splitpath(normpath)
109 normparts = util.splitpath(normpath)
109 assert len(parts) == len(normparts)
110 assert len(parts) == len(normparts)
110
111
111 parts.pop()
112 parts.pop()
112 normparts.pop()
113 normparts.pop()
113 prefixes = []
114 prefixes = []
114 while parts:
115 while parts:
115 prefix = os.sep.join(parts)
116 prefix = os.sep.join(parts)
116 normprefix = os.sep.join(normparts)
117 normprefix = os.sep.join(normparts)
117 if normprefix in self.auditeddir:
118 if normprefix in self.auditeddir:
118 break
119 break
119 curpath = os.path.join(self.root, prefix)
120 curpath = os.path.join(self.root, prefix)
120 try:
121 try:
121 st = os.lstat(curpath)
122 st = os.lstat(curpath)
122 except OSError, err:
123 except OSError, err:
123 # EINVAL can be raised as invalid path syntax under win32.
124 # EINVAL can be raised as invalid path syntax under win32.
124 # They must be ignored for patterns can be checked too.
125 # They must be ignored for patterns can be checked too.
125 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
126 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
126 raise
127 raise
127 else:
128 else:
128 if stat.S_ISLNK(st.st_mode):
129 if stat.S_ISLNK(st.st_mode):
129 raise util.Abort(
130 raise util.Abort(
130 _('path %r traverses symbolic link %r')
131 _('path %r traverses symbolic link %r')
131 % (path, prefix))
132 % (path, prefix))
132 elif (stat.S_ISDIR(st.st_mode) and
133 elif (stat.S_ISDIR(st.st_mode) and
133 os.path.isdir(os.path.join(curpath, '.hg'))):
134 os.path.isdir(os.path.join(curpath, '.hg'))):
134 if not self.callback or not self.callback(curpath):
135 if not self.callback or not self.callback(curpath):
135 raise util.Abort(_("path '%s' is inside nested repo %r") %
136 raise util.Abort(_("path '%s' is inside nested repo %r") %
136 (path, prefix))
137 (path, prefix))
137 prefixes.append(normprefix)
138 prefixes.append(normprefix)
138 parts.pop()
139 parts.pop()
139 normparts.pop()
140 normparts.pop()
140
141
141 self.audited.add(normpath)
142 self.audited.add(normpath)
142 # only add prefixes to the cache after checking everything: we don't
143 # only add prefixes to the cache after checking everything: we don't
143 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
144 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
144 self.auditeddir.update(prefixes)
145 self.auditeddir.update(prefixes)
145
146
146 class abstractopener(object):
147 class abstractopener(object):
147 """Abstract base class; cannot be instantiated"""
148 """Abstract base class; cannot be instantiated"""
148
149
149 def __init__(self, *args, **kwargs):
150 def __init__(self, *args, **kwargs):
150 '''Prevent instantiation; don't call this from subclasses.'''
151 '''Prevent instantiation; don't call this from subclasses.'''
151 raise NotImplementedError('attempted instantiating ' + str(type(self)))
152 raise NotImplementedError('attempted instantiating ' + str(type(self)))
152
153
153 def read(self, path):
154 def read(self, path):
154 fp = self(path, 'rb')
155 fp = self(path, 'rb')
155 try:
156 try:
156 return fp.read()
157 return fp.read()
157 finally:
158 finally:
158 fp.close()
159 fp.close()
159
160
160 def write(self, path, data):
161 def write(self, path, data):
161 fp = self(path, 'wb')
162 fp = self(path, 'wb')
162 try:
163 try:
163 return fp.write(data)
164 return fp.write(data)
164 finally:
165 finally:
165 fp.close()
166 fp.close()
166
167
167 def append(self, path, data):
168 def append(self, path, data):
168 fp = self(path, 'ab')
169 fp = self(path, 'ab')
169 try:
170 try:
170 return fp.write(data)
171 return fp.write(data)
171 finally:
172 finally:
172 fp.close()
173 fp.close()
173
174
174 class opener(abstractopener):
175 class opener(abstractopener):
175 '''Open files relative to a base directory
176 '''Open files relative to a base directory
176
177
177 This class is used to hide the details of COW semantics and
178 This class is used to hide the details of COW semantics and
178 remote file access from higher level code.
179 remote file access from higher level code.
179 '''
180 '''
180 def __init__(self, base, audit=True):
181 def __init__(self, base, audit=True):
181 self.base = base
182 self.base = base
182 self._audit = audit
183 self._audit = audit
183 if audit:
184 if audit:
184 self.auditor = pathauditor(base)
185 self.auditor = pathauditor(base)
185 else:
186 else:
186 self.auditor = util.always
187 self.auditor = util.always
187 self.createmode = None
188 self.createmode = None
188 self._trustnlink = None
189 self._trustnlink = None
189
190
190 @util.propertycache
191 @util.propertycache
191 def _cansymlink(self):
192 def _cansymlink(self):
192 return util.checklink(self.base)
193 return util.checklink(self.base)
193
194
194 def _fixfilemode(self, name):
195 def _fixfilemode(self, name):
195 if self.createmode is None:
196 if self.createmode is None:
196 return
197 return
197 os.chmod(name, self.createmode & 0666)
198 os.chmod(name, self.createmode & 0666)
198
199
199 def __call__(self, path, mode="r", text=False, atomictemp=False):
200 def __call__(self, path, mode="r", text=False, atomictemp=False):
200 if self._audit:
201 if self._audit:
201 r = util.checkosfilename(path)
202 r = util.checkosfilename(path)
202 if r:
203 if r:
203 raise util.Abort("%s: %r" % (r, path))
204 raise util.Abort("%s: %r" % (r, path))
204 self.auditor(path)
205 self.auditor(path)
205 f = os.path.join(self.base, path)
206 f = os.path.join(self.base, path)
206
207
207 if not text and "b" not in mode:
208 if not text and "b" not in mode:
208 mode += "b" # for that other OS
209 mode += "b" # for that other OS
209
210
210 nlink = -1
211 nlink = -1
211 dirname, basename = os.path.split(f)
212 dirname, basename = os.path.split(f)
212 # If basename is empty, then the path is malformed because it points
213 # If basename is empty, then the path is malformed because it points
213 # to a directory. Let the posixfile() call below raise IOError.
214 # to a directory. Let the posixfile() call below raise IOError.
214 if basename and mode not in ('r', 'rb'):
215 if basename and mode not in ('r', 'rb'):
215 if atomictemp:
216 if atomictemp:
216 if not os.path.isdir(dirname):
217 if not os.path.isdir(dirname):
217 util.makedirs(dirname, self.createmode)
218 util.makedirs(dirname, self.createmode)
218 return util.atomictempfile(f, mode, self.createmode)
219 return util.atomictempfile(f, mode, self.createmode)
219 try:
220 try:
220 if 'w' in mode:
221 if 'w' in mode:
221 util.unlink(f)
222 util.unlink(f)
222 nlink = 0
223 nlink = 0
223 else:
224 else:
224 # nlinks() may behave differently for files on Windows
225 # nlinks() may behave differently for files on Windows
225 # shares if the file is open.
226 # shares if the file is open.
226 fd = util.posixfile(f)
227 fd = util.posixfile(f)
227 nlink = util.nlinks(f)
228 nlink = util.nlinks(f)
228 if nlink < 1:
229 if nlink < 1:
229 nlink = 2 # force mktempcopy (issue1922)
230 nlink = 2 # force mktempcopy (issue1922)
230 fd.close()
231 fd.close()
231 except (OSError, IOError), e:
232 except (OSError, IOError), e:
232 if e.errno != errno.ENOENT:
233 if e.errno != errno.ENOENT:
233 raise
234 raise
234 nlink = 0
235 nlink = 0
235 if not os.path.isdir(dirname):
236 if not os.path.isdir(dirname):
236 util.makedirs(dirname, self.createmode)
237 util.makedirs(dirname, self.createmode)
237 if nlink > 0:
238 if nlink > 0:
238 if self._trustnlink is None:
239 if self._trustnlink is None:
239 self._trustnlink = nlink > 1 or util.checknlink(f)
240 self._trustnlink = nlink > 1 or util.checknlink(f)
240 if nlink > 1 or not self._trustnlink:
241 if nlink > 1 or not self._trustnlink:
241 util.rename(util.mktempcopy(f), f)
242 util.rename(util.mktempcopy(f), f)
242 fp = util.posixfile(f, mode)
243 fp = util.posixfile(f, mode)
243 if nlink == 0:
244 if nlink == 0:
244 self._fixfilemode(f)
245 self._fixfilemode(f)
245 return fp
246 return fp
246
247
247 def symlink(self, src, dst):
248 def symlink(self, src, dst):
248 self.auditor(dst)
249 self.auditor(dst)
249 linkname = os.path.join(self.base, dst)
250 linkname = os.path.join(self.base, dst)
250 try:
251 try:
251 os.unlink(linkname)
252 os.unlink(linkname)
252 except OSError:
253 except OSError:
253 pass
254 pass
254
255
255 dirname = os.path.dirname(linkname)
256 dirname = os.path.dirname(linkname)
256 if not os.path.exists(dirname):
257 if not os.path.exists(dirname):
257 util.makedirs(dirname, self.createmode)
258 util.makedirs(dirname, self.createmode)
258
259
259 if self._cansymlink:
260 if self._cansymlink:
260 try:
261 try:
261 os.symlink(src, linkname)
262 os.symlink(src, linkname)
262 except OSError, err:
263 except OSError, err:
263 raise OSError(err.errno, _('could not symlink to %r: %s') %
264 raise OSError(err.errno, _('could not symlink to %r: %s') %
264 (src, err.strerror), linkname)
265 (src, err.strerror), linkname)
265 else:
266 else:
266 f = self(dst, "w")
267 f = self(dst, "w")
267 f.write(src)
268 f.write(src)
268 f.close()
269 f.close()
269 self._fixfilemode(dst)
270 self._fixfilemode(dst)
270
271
271 def audit(self, path):
272 def audit(self, path):
272 self.auditor(path)
273 self.auditor(path)
273
274
274 class filteropener(abstractopener):
275 class filteropener(abstractopener):
275 '''Wrapper opener for filtering filenames with a function.'''
276 '''Wrapper opener for filtering filenames with a function.'''
276
277
277 def __init__(self, opener, filter):
278 def __init__(self, opener, filter):
278 self._filter = filter
279 self._filter = filter
279 self._orig = opener
280 self._orig = opener
280
281
281 def __call__(self, path, *args, **kwargs):
282 def __call__(self, path, *args, **kwargs):
282 return self._orig(self._filter(path), *args, **kwargs)
283 return self._orig(self._filter(path), *args, **kwargs)
283
284
284 def canonpath(root, cwd, myname, auditor=None):
285 def canonpath(root, cwd, myname, auditor=None):
285 '''return the canonical path of myname, given cwd and root'''
286 '''return the canonical path of myname, given cwd and root'''
286 if util.endswithsep(root):
287 if util.endswithsep(root):
287 rootsep = root
288 rootsep = root
288 else:
289 else:
289 rootsep = root + os.sep
290 rootsep = root + os.sep
290 name = myname
291 name = myname
291 if not os.path.isabs(name):
292 if not os.path.isabs(name):
292 name = os.path.join(root, cwd, name)
293 name = os.path.join(root, cwd, name)
293 name = os.path.normpath(name)
294 name = os.path.normpath(name)
294 if auditor is None:
295 if auditor is None:
295 auditor = pathauditor(root)
296 auditor = pathauditor(root)
296 if name != rootsep and name.startswith(rootsep):
297 if name != rootsep and name.startswith(rootsep):
297 name = name[len(rootsep):]
298 name = name[len(rootsep):]
298 auditor(name)
299 auditor(name)
299 return util.pconvert(name)
300 return util.pconvert(name)
300 elif name == root:
301 elif name == root:
301 return ''
302 return ''
302 else:
303 else:
303 # Determine whether `name' is in the hierarchy at or beneath `root',
304 # Determine whether `name' is in the hierarchy at or beneath `root',
304 # by iterating name=dirname(name) until that causes no change (can't
305 # by iterating name=dirname(name) until that causes no change (can't
305 # check name == '/', because that doesn't work on windows). For each
306 # check name == '/', because that doesn't work on windows). For each
306 # `name', compare dev/inode numbers. If they match, the list `rel'
307 # `name', compare dev/inode numbers. If they match, the list `rel'
307 # holds the reversed list of components making up the relative file
308 # holds the reversed list of components making up the relative file
308 # name we want.
309 # name we want.
309 root_st = os.stat(root)
310 root_st = os.stat(root)
310 rel = []
311 rel = []
311 while True:
312 while True:
312 try:
313 try:
313 name_st = os.stat(name)
314 name_st = os.stat(name)
314 except OSError:
315 except OSError:
315 break
316 break
316 if util.samestat(name_st, root_st):
317 if util.samestat(name_st, root_st):
317 if not rel:
318 if not rel:
318 # name was actually the same as root (maybe a symlink)
319 # name was actually the same as root (maybe a symlink)
319 return ''
320 return ''
320 rel.reverse()
321 rel.reverse()
321 name = os.path.join(*rel)
322 name = os.path.join(*rel)
322 auditor(name)
323 auditor(name)
323 return util.pconvert(name)
324 return util.pconvert(name)
324 dirname, basename = os.path.split(name)
325 dirname, basename = os.path.split(name)
325 rel.append(basename)
326 rel.append(basename)
326 if dirname == name:
327 if dirname == name:
327 break
328 break
328 name = dirname
329 name = dirname
329
330
330 raise util.Abort('%s not under root' % myname)
331 raise util.Abort('%s not under root' % myname)
331
332
332 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
333 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
333 '''yield every hg repository under path, recursively.'''
334 '''yield every hg repository under path, recursively.'''
334 def errhandler(err):
335 def errhandler(err):
335 if err.filename == path:
336 if err.filename == path:
336 raise err
337 raise err
337 samestat = getattr(os.path, 'samestat', None)
338 samestat = getattr(os.path, 'samestat', None)
338 if followsym and samestat is not None:
339 if followsym and samestat is not None:
339 def adddir(dirlst, dirname):
340 def adddir(dirlst, dirname):
340 match = False
341 match = False
341 dirstat = os.stat(dirname)
342 dirstat = os.stat(dirname)
342 for lstdirstat in dirlst:
343 for lstdirstat in dirlst:
343 if samestat(dirstat, lstdirstat):
344 if samestat(dirstat, lstdirstat):
344 match = True
345 match = True
345 break
346 break
346 if not match:
347 if not match:
347 dirlst.append(dirstat)
348 dirlst.append(dirstat)
348 return not match
349 return not match
349 else:
350 else:
350 followsym = False
351 followsym = False
351
352
352 if (seen_dirs is None) and followsym:
353 if (seen_dirs is None) and followsym:
353 seen_dirs = []
354 seen_dirs = []
354 adddir(seen_dirs, path)
355 adddir(seen_dirs, path)
355 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
356 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
356 dirs.sort()
357 dirs.sort()
357 if '.hg' in dirs:
358 if '.hg' in dirs:
358 yield root # found a repository
359 yield root # found a repository
359 qroot = os.path.join(root, '.hg', 'patches')
360 qroot = os.path.join(root, '.hg', 'patches')
360 if os.path.isdir(os.path.join(qroot, '.hg')):
361 if os.path.isdir(os.path.join(qroot, '.hg')):
361 yield qroot # we have a patch queue repo here
362 yield qroot # we have a patch queue repo here
362 if recurse:
363 if recurse:
363 # avoid recursing inside the .hg directory
364 # avoid recursing inside the .hg directory
364 dirs.remove('.hg')
365 dirs.remove('.hg')
365 else:
366 else:
366 dirs[:] = [] # don't descend further
367 dirs[:] = [] # don't descend further
367 elif followsym:
368 elif followsym:
368 newdirs = []
369 newdirs = []
369 for d in dirs:
370 for d in dirs:
370 fname = os.path.join(root, d)
371 fname = os.path.join(root, d)
371 if adddir(seen_dirs, fname):
372 if adddir(seen_dirs, fname):
372 if os.path.islink(fname):
373 if os.path.islink(fname):
373 for hgname in walkrepos(fname, True, seen_dirs):
374 for hgname in walkrepos(fname, True, seen_dirs):
374 yield hgname
375 yield hgname
375 else:
376 else:
376 newdirs.append(d)
377 newdirs.append(d)
377 dirs[:] = newdirs
378 dirs[:] = newdirs
378
379
379 def osrcpath():
380 def osrcpath():
380 '''return default os-specific hgrc search path'''
381 '''return default os-specific hgrc search path'''
381 path = systemrcpath()
382 path = systemrcpath()
382 path.extend(userrcpath())
383 path.extend(userrcpath())
383 path = [os.path.normpath(f) for f in path]
384 path = [os.path.normpath(f) for f in path]
384 return path
385 return path
385
386
386 _rcpath = None
387 _rcpath = None
387
388
388 def rcpath():
389 def rcpath():
389 '''return hgrc search path. if env var HGRCPATH is set, use it.
390 '''return hgrc search path. if env var HGRCPATH is set, use it.
390 for each item in path, if directory, use files ending in .rc,
391 for each item in path, if directory, use files ending in .rc,
391 else use item.
392 else use item.
392 make HGRCPATH empty to only look in .hg/hgrc of current repo.
393 make HGRCPATH empty to only look in .hg/hgrc of current repo.
393 if no HGRCPATH, use default os-specific path.'''
394 if no HGRCPATH, use default os-specific path.'''
394 global _rcpath
395 global _rcpath
395 if _rcpath is None:
396 if _rcpath is None:
396 if 'HGRCPATH' in os.environ:
397 if 'HGRCPATH' in os.environ:
397 _rcpath = []
398 _rcpath = []
398 for p in os.environ['HGRCPATH'].split(os.pathsep):
399 for p in os.environ['HGRCPATH'].split(os.pathsep):
399 if not p:
400 if not p:
400 continue
401 continue
401 p = util.expandpath(p)
402 p = util.expandpath(p)
402 if os.path.isdir(p):
403 if os.path.isdir(p):
403 for f, kind in osutil.listdir(p):
404 for f, kind in osutil.listdir(p):
404 if f.endswith('.rc'):
405 if f.endswith('.rc'):
405 _rcpath.append(os.path.join(p, f))
406 _rcpath.append(os.path.join(p, f))
406 else:
407 else:
407 _rcpath.append(p)
408 _rcpath.append(p)
408 else:
409 else:
409 _rcpath = osrcpath()
410 _rcpath = osrcpath()
410 return _rcpath
411 return _rcpath
411
412
412 if os.name != 'nt':
413 if os.name != 'nt':
413
414
414 def rcfiles(path):
415 def rcfiles(path):
415 rcs = [os.path.join(path, 'hgrc')]
416 rcs = [os.path.join(path, 'hgrc')]
416 rcdir = os.path.join(path, 'hgrc.d')
417 rcdir = os.path.join(path, 'hgrc.d')
417 try:
418 try:
418 rcs.extend([os.path.join(rcdir, f)
419 rcs.extend([os.path.join(rcdir, f)
419 for f, kind in osutil.listdir(rcdir)
420 for f, kind in osutil.listdir(rcdir)
420 if f.endswith(".rc")])
421 if f.endswith(".rc")])
421 except OSError:
422 except OSError:
422 pass
423 pass
423 return rcs
424 return rcs
424
425
425 def systemrcpath():
426 def systemrcpath():
426 path = []
427 path = []
427 # old mod_python does not set sys.argv
428 # old mod_python does not set sys.argv
428 if len(getattr(sys, 'argv', [])) > 0:
429 if len(getattr(sys, 'argv', [])) > 0:
429 p = os.path.dirname(os.path.dirname(sys.argv[0]))
430 p = os.path.dirname(os.path.dirname(sys.argv[0]))
430 path.extend(rcfiles(os.path.join(p, 'etc/mercurial')))
431 path.extend(rcfiles(os.path.join(p, 'etc/mercurial')))
431 path.extend(rcfiles('/etc/mercurial'))
432 path.extend(rcfiles('/etc/mercurial'))
432 return path
433 return path
433
434
434 def userrcpath():
435 def userrcpath():
435 return [os.path.expanduser('~/.hgrc')]
436 return [os.path.expanduser('~/.hgrc')]
436
437
437 else:
438 else:
438
439
439 _HKEY_LOCAL_MACHINE = 0x80000002L
440 _HKEY_LOCAL_MACHINE = 0x80000002L
440
441
441 def systemrcpath():
442 def systemrcpath():
442 '''return default os-specific hgrc search path'''
443 '''return default os-specific hgrc search path'''
443 rcpath = []
444 rcpath = []
444 filename = util.executablepath()
445 filename = util.executablepath()
445 # Use mercurial.ini found in directory with hg.exe
446 # Use mercurial.ini found in directory with hg.exe
446 progrc = os.path.join(os.path.dirname(filename), 'mercurial.ini')
447 progrc = os.path.join(os.path.dirname(filename), 'mercurial.ini')
447 if os.path.isfile(progrc):
448 if os.path.isfile(progrc):
448 rcpath.append(progrc)
449 rcpath.append(progrc)
449 return rcpath
450 return rcpath
450 # Use hgrc.d found in directory with hg.exe
451 # Use hgrc.d found in directory with hg.exe
451 progrcd = os.path.join(os.path.dirname(filename), 'hgrc.d')
452 progrcd = os.path.join(os.path.dirname(filename), 'hgrc.d')
452 if os.path.isdir(progrcd):
453 if os.path.isdir(progrcd):
453 for f, kind in osutil.listdir(progrcd):
454 for f, kind in osutil.listdir(progrcd):
454 if f.endswith('.rc'):
455 if f.endswith('.rc'):
455 rcpath.append(os.path.join(progrcd, f))
456 rcpath.append(os.path.join(progrcd, f))
456 return rcpath
457 return rcpath
457 # else look for a system rcpath in the registry
458 # else look for a system rcpath in the registry
458 value = util.lookupreg('SOFTWARE\\Mercurial', None,
459 value = util.lookupreg('SOFTWARE\\Mercurial', None,
459 _HKEY_LOCAL_MACHINE)
460 _HKEY_LOCAL_MACHINE)
460 if not isinstance(value, str) or not value:
461 if not isinstance(value, str) or not value:
461 return rcpath
462 return rcpath
462 value = value.replace('/', os.sep)
463 value = value.replace('/', os.sep)
463 for p in value.split(os.pathsep):
464 for p in value.split(os.pathsep):
464 if p.lower().endswith('mercurial.ini'):
465 if p.lower().endswith('mercurial.ini'):
465 rcpath.append(p)
466 rcpath.append(p)
466 elif os.path.isdir(p):
467 elif os.path.isdir(p):
467 for f, kind in osutil.listdir(p):
468 for f, kind in osutil.listdir(p):
468 if f.endswith('.rc'):
469 if f.endswith('.rc'):
469 rcpath.append(os.path.join(p, f))
470 rcpath.append(os.path.join(p, f))
470 return rcpath
471 return rcpath
471
472
472 def userrcpath():
473 def userrcpath():
473 '''return os-specific hgrc search path to the user dir'''
474 '''return os-specific hgrc search path to the user dir'''
474 home = os.path.expanduser('~')
475 home = os.path.expanduser('~')
475 path = [os.path.join(home, 'mercurial.ini'),
476 path = [os.path.join(home, 'mercurial.ini'),
476 os.path.join(home, '.hgrc')]
477 os.path.join(home, '.hgrc')]
477 userprofile = os.environ.get('USERPROFILE')
478 userprofile = os.environ.get('USERPROFILE')
478 if userprofile:
479 if userprofile:
479 path.append(os.path.join(userprofile, 'mercurial.ini'))
480 path.append(os.path.join(userprofile, 'mercurial.ini'))
480 path.append(os.path.join(userprofile, '.hgrc'))
481 path.append(os.path.join(userprofile, '.hgrc'))
481 return path
482 return path
482
483
483 def revsingle(repo, revspec, default='.'):
484 def revsingle(repo, revspec, default='.'):
484 if not revspec:
485 if not revspec:
485 return repo[default]
486 return repo[default]
486
487
487 l = revrange(repo, [revspec])
488 l = revrange(repo, [revspec])
488 if len(l) < 1:
489 if len(l) < 1:
489 raise util.Abort(_('empty revision set'))
490 raise util.Abort(_('empty revision set'))
490 return repo[l[-1]]
491 return repo[l[-1]]
491
492
492 def revpair(repo, revs):
493 def revpair(repo, revs):
493 if not revs:
494 if not revs:
494 return repo.dirstate.p1(), None
495 return repo.dirstate.p1(), None
495
496
496 l = revrange(repo, revs)
497 l = revrange(repo, revs)
497
498
498 if len(l) == 0:
499 if len(l) == 0:
499 return repo.dirstate.p1(), None
500 return repo.dirstate.p1(), None
500
501
501 if len(l) == 1:
502 if len(l) == 1:
502 return repo.lookup(l[0]), None
503 return repo.lookup(l[0]), None
503
504
504 return repo.lookup(l[0]), repo.lookup(l[-1])
505 return repo.lookup(l[0]), repo.lookup(l[-1])
505
506
506 _revrangesep = ':'
507 _revrangesep = ':'
507
508
508 def revrange(repo, revs):
509 def revrange(repo, revs):
509 """Yield revision as strings from a list of revision specifications."""
510 """Yield revision as strings from a list of revision specifications."""
510
511
511 def revfix(repo, val, defval):
512 def revfix(repo, val, defval):
512 if not val and val != 0 and defval is not None:
513 if not val and val != 0 and defval is not None:
513 return defval
514 return defval
514 return repo.changelog.rev(repo.lookup(val))
515 return repo.changelog.rev(repo.lookup(val))
515
516
516 seen, l = set(), []
517 seen, l = set(), []
517 for spec in revs:
518 for spec in revs:
518 # attempt to parse old-style ranges first to deal with
519 # attempt to parse old-style ranges first to deal with
519 # things like old-tag which contain query metacharacters
520 # things like old-tag which contain query metacharacters
520 try:
521 try:
521 if isinstance(spec, int):
522 if isinstance(spec, int):
522 seen.add(spec)
523 seen.add(spec)
523 l.append(spec)
524 l.append(spec)
524 continue
525 continue
525
526
526 if _revrangesep in spec:
527 if _revrangesep in spec:
527 start, end = spec.split(_revrangesep, 1)
528 start, end = spec.split(_revrangesep, 1)
528 start = revfix(repo, start, 0)
529 start = revfix(repo, start, 0)
529 end = revfix(repo, end, len(repo) - 1)
530 end = revfix(repo, end, len(repo) - 1)
530 step = start > end and -1 or 1
531 step = start > end and -1 or 1
531 for rev in xrange(start, end + step, step):
532 for rev in xrange(start, end + step, step):
532 if rev in seen:
533 if rev in seen:
533 continue
534 continue
534 seen.add(rev)
535 seen.add(rev)
535 l.append(rev)
536 l.append(rev)
536 continue
537 continue
537 elif spec and spec in repo: # single unquoted rev
538 elif spec and spec in repo: # single unquoted rev
538 rev = revfix(repo, spec, None)
539 rev = revfix(repo, spec, None)
539 if rev in seen:
540 if rev in seen:
540 continue
541 continue
541 seen.add(rev)
542 seen.add(rev)
542 l.append(rev)
543 l.append(rev)
543 continue
544 continue
544 except error.RepoLookupError:
545 except error.RepoLookupError:
545 pass
546 pass
546
547
547 # fall through to new-style queries if old-style fails
548 # fall through to new-style queries if old-style fails
548 m = revset.match(repo.ui, spec)
549 m = revset.match(repo.ui, spec)
549 for r in m(repo, range(len(repo))):
550 for r in m(repo, range(len(repo))):
550 if r not in seen:
551 if r not in seen:
551 l.append(r)
552 l.append(r)
552 seen.update(l)
553 seen.update(l)
553
554
554 return l
555 return l
555
556
556 def expandpats(pats):
557 def expandpats(pats):
557 if not util.expandglobs:
558 if not util.expandglobs:
558 return list(pats)
559 return list(pats)
559 ret = []
560 ret = []
560 for p in pats:
561 for p in pats:
561 kind, name = matchmod._patsplit(p, None)
562 kind, name = matchmod._patsplit(p, None)
562 if kind is None:
563 if kind is None:
563 try:
564 try:
564 globbed = glob.glob(name)
565 globbed = glob.glob(name)
565 except re.error:
566 except re.error:
566 globbed = [name]
567 globbed = [name]
567 if globbed:
568 if globbed:
568 ret.extend(globbed)
569 ret.extend(globbed)
569 continue
570 continue
570 ret.append(p)
571 ret.append(p)
571 return ret
572 return ret
572
573
573 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
574 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
574 if pats == ("",):
575 if pats == ("",):
575 pats = []
576 pats = []
576 if not globbed and default == 'relpath':
577 if not globbed and default == 'relpath':
577 pats = expandpats(pats or [])
578 pats = expandpats(pats or [])
578
579
579 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
580 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
580 default)
581 default)
581 def badfn(f, msg):
582 def badfn(f, msg):
582 ctx._repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
583 ctx._repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
583 m.bad = badfn
584 m.bad = badfn
584 return m
585 return m
585
586
586 def matchall(repo):
587 def matchall(repo):
587 return matchmod.always(repo.root, repo.getcwd())
588 return matchmod.always(repo.root, repo.getcwd())
588
589
589 def matchfiles(repo, files):
590 def matchfiles(repo, files):
590 return matchmod.exact(repo.root, repo.getcwd(), files)
591 return matchmod.exact(repo.root, repo.getcwd(), files)
591
592
592 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
593 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
593 if dry_run is None:
594 if dry_run is None:
594 dry_run = opts.get('dry_run')
595 dry_run = opts.get('dry_run')
595 if similarity is None:
596 if similarity is None:
596 similarity = float(opts.get('similarity') or 0)
597 similarity = float(opts.get('similarity') or 0)
597 # we'd use status here, except handling of symlinks and ignore is tricky
598 # we'd use status here, except handling of symlinks and ignore is tricky
598 added, unknown, deleted, removed = [], [], [], []
599 added, unknown, deleted, removed = [], [], [], []
599 audit_path = pathauditor(repo.root)
600 audit_path = pathauditor(repo.root)
600 m = match(repo[None], pats, opts)
601 m = match(repo[None], pats, opts)
601 for abs in repo.walk(m):
602 for abs in repo.walk(m):
602 target = repo.wjoin(abs)
603 target = repo.wjoin(abs)
603 good = True
604 good = True
604 try:
605 try:
605 audit_path(abs)
606 audit_path(abs)
606 except (OSError, util.Abort):
607 except (OSError, util.Abort):
607 good = False
608 good = False
608 rel = m.rel(abs)
609 rel = m.rel(abs)
609 exact = m.exact(abs)
610 exact = m.exact(abs)
610 if good and abs not in repo.dirstate:
611 if good and abs not in repo.dirstate:
611 unknown.append(abs)
612 unknown.append(abs)
612 if repo.ui.verbose or not exact:
613 if repo.ui.verbose or not exact:
613 repo.ui.status(_('adding %s\n') % ((pats and rel) or abs))
614 repo.ui.status(_('adding %s\n') % ((pats and rel) or abs))
614 elif repo.dirstate[abs] != 'r' and (not good or not os.path.lexists(target)
615 elif repo.dirstate[abs] != 'r' and (not good or not os.path.lexists(target)
615 or (os.path.isdir(target) and not os.path.islink(target))):
616 or (os.path.isdir(target) and not os.path.islink(target))):
616 deleted.append(abs)
617 deleted.append(abs)
617 if repo.ui.verbose or not exact:
618 if repo.ui.verbose or not exact:
618 repo.ui.status(_('removing %s\n') % ((pats and rel) or abs))
619 repo.ui.status(_('removing %s\n') % ((pats and rel) or abs))
619 # for finding renames
620 # for finding renames
620 elif repo.dirstate[abs] == 'r':
621 elif repo.dirstate[abs] == 'r':
621 removed.append(abs)
622 removed.append(abs)
622 elif repo.dirstate[abs] == 'a':
623 elif repo.dirstate[abs] == 'a':
623 added.append(abs)
624 added.append(abs)
624 copies = {}
625 copies = {}
625 if similarity > 0:
626 if similarity > 0:
626 for old, new, score in similar.findrenames(repo,
627 for old, new, score in similar.findrenames(repo,
627 added + unknown, removed + deleted, similarity):
628 added + unknown, removed + deleted, similarity):
628 if repo.ui.verbose or not m.exact(old) or not m.exact(new):
629 if repo.ui.verbose or not m.exact(old) or not m.exact(new):
629 repo.ui.status(_('recording removal of %s as rename to %s '
630 repo.ui.status(_('recording removal of %s as rename to %s '
630 '(%d%% similar)\n') %
631 '(%d%% similar)\n') %
631 (m.rel(old), m.rel(new), score * 100))
632 (m.rel(old), m.rel(new), score * 100))
632 copies[new] = old
633 copies[new] = old
633
634
634 if not dry_run:
635 if not dry_run:
635 wctx = repo[None]
636 wctx = repo[None]
636 wlock = repo.wlock()
637 wlock = repo.wlock()
637 try:
638 try:
638 wctx.forget(deleted)
639 wctx.forget(deleted)
639 wctx.add(unknown)
640 wctx.add(unknown)
640 for new, old in copies.iteritems():
641 for new, old in copies.iteritems():
641 wctx.copy(old, new)
642 wctx.copy(old, new)
642 finally:
643 finally:
643 wlock.release()
644 wlock.release()
644
645
645 def updatedir(ui, repo, patches, similarity=0):
646 def updatedir(ui, repo, patches, similarity=0):
646 '''Update dirstate after patch application according to metadata'''
647 '''Update dirstate after patch application according to metadata'''
647 if not patches:
648 if not patches:
648 return []
649 return []
649 copies = []
650 copies = []
650 removes = set()
651 removes = set()
651 cfiles = patches.keys()
652 cfiles = patches.keys()
652 cwd = repo.getcwd()
653 cwd = repo.getcwd()
653 if cwd:
654 if cwd:
654 cfiles = [util.pathto(repo.root, cwd, f) for f in patches.keys()]
655 cfiles = [util.pathto(repo.root, cwd, f) for f in patches.keys()]
655 for f in patches:
656 for f in patches:
656 gp = patches[f]
657 gp = patches[f]
657 if not gp:
658 if not gp:
658 continue
659 continue
659 if gp.op == 'RENAME':
660 if gp.op == 'RENAME':
660 copies.append((gp.oldpath, gp.path))
661 copies.append((gp.oldpath, gp.path))
661 removes.add(gp.oldpath)
662 removes.add(gp.oldpath)
662 elif gp.op == 'COPY':
663 elif gp.op == 'COPY':
663 copies.append((gp.oldpath, gp.path))
664 copies.append((gp.oldpath, gp.path))
664 elif gp.op == 'DELETE':
665 elif gp.op == 'DELETE':
665 removes.add(gp.path)
666 removes.add(gp.path)
666
667
667 wctx = repo[None]
668 wctx = repo[None]
668 for src, dst in copies:
669 for src, dst in copies:
669 dirstatecopy(ui, repo, wctx, src, dst, cwd=cwd)
670 dirstatecopy(ui, repo, wctx, src, dst, cwd=cwd)
670 if (not similarity) and removes:
671 if (not similarity) and removes:
671 wctx.remove(sorted(removes), True)
672 wctx.remove(sorted(removes), True)
672
673
673 for f in patches:
674 for f in patches:
674 gp = patches[f]
675 gp = patches[f]
675 if gp and gp.mode:
676 if gp and gp.mode:
676 islink, isexec = gp.mode
677 islink, isexec = gp.mode
677 dst = repo.wjoin(gp.path)
678 dst = repo.wjoin(gp.path)
678 # patch won't create empty files
679 # patch won't create empty files
679 if gp.op == 'ADD' and not os.path.lexists(dst):
680 if gp.op == 'ADD' and not os.path.lexists(dst):
680 flags = (isexec and 'x' or '') + (islink and 'l' or '')
681 flags = (isexec and 'x' or '') + (islink and 'l' or '')
681 repo.wwrite(gp.path, '', flags)
682 repo.wwrite(gp.path, '', flags)
682 util.setflags(dst, islink, isexec)
683 util.setflags(dst, islink, isexec)
683 addremove(repo, cfiles, similarity=similarity)
684 addremove(repo, cfiles, similarity=similarity)
684 files = patches.keys()
685 files = patches.keys()
685 files.extend([r for r in removes if r not in files])
686 files.extend([r for r in removes if r not in files])
686 return sorted(files)
687 return sorted(files)
687
688
688 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
689 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
689 """Update the dirstate to reflect the intent of copying src to dst. For
690 """Update the dirstate to reflect the intent of copying src to dst. For
690 different reasons it might not end with dst being marked as copied from src.
691 different reasons it might not end with dst being marked as copied from src.
691 """
692 """
692 origsrc = repo.dirstate.copied(src) or src
693 origsrc = repo.dirstate.copied(src) or src
693 if dst == origsrc: # copying back a copy?
694 if dst == origsrc: # copying back a copy?
694 if repo.dirstate[dst] not in 'mn' and not dryrun:
695 if repo.dirstate[dst] not in 'mn' and not dryrun:
695 repo.dirstate.normallookup(dst)
696 repo.dirstate.normallookup(dst)
696 else:
697 else:
697 if repo.dirstate[origsrc] == 'a' and origsrc == src:
698 if repo.dirstate[origsrc] == 'a' and origsrc == src:
698 if not ui.quiet:
699 if not ui.quiet:
699 ui.warn(_("%s has not been committed yet, so no copy "
700 ui.warn(_("%s has not been committed yet, so no copy "
700 "data will be stored for %s.\n")
701 "data will be stored for %s.\n")
701 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
702 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
702 if repo.dirstate[dst] in '?r' and not dryrun:
703 if repo.dirstate[dst] in '?r' and not dryrun:
703 wctx.add([dst])
704 wctx.add([dst])
704 elif not dryrun:
705 elif not dryrun:
705 wctx.copy(origsrc, dst)
706 wctx.copy(origsrc, dst)
706
707
707 def readrequires(opener, supported):
708 def readrequires(opener, supported):
708 '''Reads and parses .hg/requires and checks if all entries found
709 '''Reads and parses .hg/requires and checks if all entries found
709 are in the list of supported features.'''
710 are in the list of supported features.'''
710 requirements = set(opener.read("requires").splitlines())
711 requirements = set(opener.read("requires").splitlines())
711 missings = []
712 missings = []
712 for r in requirements:
713 for r in requirements:
713 if r not in supported:
714 if r not in supported:
714 if not r or not r[0].isalnum():
715 if not r or not r[0].isalnum():
715 raise error.RequirementError(_(".hg/requires file is corrupt"))
716 raise error.RequirementError(_(".hg/requires file is corrupt"))
716 missings.append(r)
717 missings.append(r)
717 missings.sort()
718 missings.sort()
718 if missings:
719 if missings:
719 raise error.RequirementError(_("unknown repository format: "
720 raise error.RequirementError(_("unknown repository format: "
720 "requires features '%s' (upgrade Mercurial)") % "', '".join(missings))
721 "requires features '%s' (upgrade Mercurial)") % "', '".join(missings))
721 return requirements
722 return requirements
722
723
723 class filecacheentry(object):
724 class filecacheentry(object):
724 def __init__(self, path):
725 def __init__(self, path):
725 self.path = path
726 self.path = path
726 self.cachestat = filecacheentry.stat(self.path)
727 self.cachestat = filecacheentry.stat(self.path)
727
728
728 if self.cachestat:
729 if self.cachestat:
729 self._cacheable = self.cachestat.cacheable()
730 self._cacheable = self.cachestat.cacheable()
730 else:
731 else:
731 # None means we don't know yet
732 # None means we don't know yet
732 self._cacheable = None
733 self._cacheable = None
733
734
734 def refresh(self):
735 def refresh(self):
735 if self.cacheable():
736 if self.cacheable():
736 self.cachestat = filecacheentry.stat(self.path)
737 self.cachestat = filecacheentry.stat(self.path)
737
738
738 def cacheable(self):
739 def cacheable(self):
739 if self._cacheable is not None:
740 if self._cacheable is not None:
740 return self._cacheable
741 return self._cacheable
741
742
742 # we don't know yet, assume it is for now
743 # we don't know yet, assume it is for now
743 return True
744 return True
744
745
745 def changed(self):
746 def changed(self):
746 # no point in going further if we can't cache it
747 # no point in going further if we can't cache it
747 if not self.cacheable():
748 if not self.cacheable():
748 return True
749 return True
749
750
750 newstat = filecacheentry.stat(self.path)
751 newstat = filecacheentry.stat(self.path)
751
752
752 # we may not know if it's cacheable yet, check again now
753 # we may not know if it's cacheable yet, check again now
753 if newstat and self._cacheable is None:
754 if newstat and self._cacheable is None:
754 self._cacheable = newstat.cacheable()
755 self._cacheable = newstat.cacheable()
755
756
756 # check again
757 # check again
757 if not self._cacheable:
758 if not self._cacheable:
758 return True
759 return True
759
760
760 if self.cachestat != newstat:
761 if self.cachestat != newstat:
761 self.cachestat = newstat
762 self.cachestat = newstat
762 return True
763 return True
763 else:
764 else:
764 return False
765 return False
765
766
766 @staticmethod
767 @staticmethod
767 def stat(path):
768 def stat(path):
768 try:
769 try:
769 return util.cachestat(path)
770 return util.cachestat(path)
770 except OSError, e:
771 except OSError, e:
771 if e.errno != errno.ENOENT:
772 if e.errno != errno.ENOENT:
772 raise
773 raise
773
774
774 class filecache(object):
775 class filecache(object):
775 '''A property like decorator that tracks a file under .hg/ for updates.
776 '''A property like decorator that tracks a file under .hg/ for updates.
776
777
777 Records stat info when called in _filecache.
778 Records stat info when called in _filecache.
778
779
779 On subsequent calls, compares old stat info with new info, and recreates
780 On subsequent calls, compares old stat info with new info, and recreates
780 the object when needed, updating the new stat info in _filecache.
781 the object when needed, updating the new stat info in _filecache.
781
782
782 Mercurial either atomic renames or appends for files under .hg,
783 Mercurial either atomic renames or appends for files under .hg,
783 so to ensure the cache is reliable we need the filesystem to be able
784 so to ensure the cache is reliable we need the filesystem to be able
784 to tell us if a file has been replaced. If it can't, we fallback to
785 to tell us if a file has been replaced. If it can't, we fallback to
785 recreating the object on every call (essentially the same behaviour as
786 recreating the object on every call (essentially the same behaviour as
786 propertycache).'''
787 propertycache).'''
787 def __init__(self, path, instore=False):
788 def __init__(self, path, instore=False):
788 self.path = path
789 self.path = path
789 self.instore = instore
790 self.instore = instore
790
791
791 def __call__(self, func):
792 def __call__(self, func):
792 self.func = func
793 self.func = func
793 self.name = func.__name__
794 self.name = func.__name__
794 return self
795 return self
795
796
796 def __get__(self, obj, type=None):
797 def __get__(self, obj, type=None):
797 entry = obj._filecache.get(self.name)
798 entry = obj._filecache.get(self.name)
798
799
799 if entry:
800 if entry:
800 if entry.changed():
801 if entry.changed():
801 entry.obj = self.func(obj)
802 entry.obj = self.func(obj)
802 else:
803 else:
803 path = self.instore and obj.sjoin(self.path) or obj.join(self.path)
804 path = self.instore and obj.sjoin(self.path) or obj.join(self.path)
804
805
805 # We stat -before- creating the object so our cache doesn't lie if
806 # We stat -before- creating the object so our cache doesn't lie if
806 # a writer modified between the time we read and stat
807 # a writer modified between the time we read and stat
807 entry = filecacheentry(path)
808 entry = filecacheentry(path)
808 entry.obj = self.func(obj)
809 entry.obj = self.func(obj)
809
810
810 obj._filecache[self.name] = entry
811 obj._filecache[self.name] = entry
811
812
812 setattr(obj, self.name, entry.obj)
813 setattr(obj, self.name, entry.obj)
813 return entry.obj
814 return entry.obj
@@ -1,1149 +1,1149
1 # subrepo.py - sub-repository handling for Mercurial
1 # subrepo.py - sub-repository handling for Mercurial
2 #
2 #
3 # Copyright 2009-2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2009-2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import errno, os, re, xml.dom.minidom, shutil, posixpath
8 import errno, os, re, xml.dom.minidom, shutil, posixpath
9 import stat, subprocess, tarfile
9 import stat, subprocess, tarfile
10 from i18n import _
10 from i18n import _
11 import config, scmutil, util, node, error, cmdutil, bookmarks
11 import config, scmutil, util, node, error, cmdutil, bookmarks
12 hg = None
12 hg = None
13 propertycache = util.propertycache
13 propertycache = util.propertycache
14
14
15 nullstate = ('', '', 'empty')
15 nullstate = ('', '', 'empty')
16
16
17 def state(ctx, ui):
17 def state(ctx, ui):
18 """return a state dict, mapping subrepo paths configured in .hgsub
18 """return a state dict, mapping subrepo paths configured in .hgsub
19 to tuple: (source from .hgsub, revision from .hgsubstate, kind
19 to tuple: (source from .hgsub, revision from .hgsubstate, kind
20 (key in types dict))
20 (key in types dict))
21 """
21 """
22 p = config.config()
22 p = config.config()
23 def read(f, sections=None, remap=None):
23 def read(f, sections=None, remap=None):
24 if f in ctx:
24 if f in ctx:
25 try:
25 try:
26 data = ctx[f].data()
26 data = ctx[f].data()
27 except IOError, err:
27 except IOError, err:
28 if err.errno != errno.ENOENT:
28 if err.errno != errno.ENOENT:
29 raise
29 raise
30 # handle missing subrepo spec files as removed
30 # handle missing subrepo spec files as removed
31 ui.warn(_("warning: subrepo spec file %s not found\n") % f)
31 ui.warn(_("warning: subrepo spec file %s not found\n") % f)
32 return
32 return
33 p.parse(f, data, sections, remap, read)
33 p.parse(f, data, sections, remap, read)
34 else:
34 else:
35 raise util.Abort(_("subrepo spec file %s not found") % f)
35 raise util.Abort(_("subrepo spec file %s not found") % f)
36
36
37 if '.hgsub' in ctx:
37 if '.hgsub' in ctx:
38 read('.hgsub')
38 read('.hgsub')
39
39
40 for path, src in ui.configitems('subpaths'):
40 for path, src in ui.configitems('subpaths'):
41 p.set('subpaths', path, src, ui.configsource('subpaths', path))
41 p.set('subpaths', path, src, ui.configsource('subpaths', path))
42
42
43 rev = {}
43 rev = {}
44 if '.hgsubstate' in ctx:
44 if '.hgsubstate' in ctx:
45 try:
45 try:
46 for l in ctx['.hgsubstate'].data().splitlines():
46 for l in ctx['.hgsubstate'].data().splitlines():
47 revision, path = l.split(" ", 1)
47 revision, path = l.split(" ", 1)
48 rev[path] = revision
48 rev[path] = revision
49 except IOError, err:
49 except IOError, err:
50 if err.errno != errno.ENOENT:
50 if err.errno != errno.ENOENT:
51 raise
51 raise
52
52
53 def remap(src):
53 def remap(src):
54 for pattern, repl in p.items('subpaths'):
54 for pattern, repl in p.items('subpaths'):
55 # Turn r'C:\foo\bar' into r'C:\\foo\\bar' since re.sub
55 # Turn r'C:\foo\bar' into r'C:\\foo\\bar' since re.sub
56 # does a string decode.
56 # does a string decode.
57 repl = repl.encode('string-escape')
57 repl = repl.encode('string-escape')
58 # However, we still want to allow back references to go
58 # However, we still want to allow back references to go
59 # through unharmed, so we turn r'\\1' into r'\1'. Again,
59 # through unharmed, so we turn r'\\1' into r'\1'. Again,
60 # extra escapes are needed because re.sub string decodes.
60 # extra escapes are needed because re.sub string decodes.
61 repl = re.sub(r'\\\\([0-9]+)', r'\\\1', repl)
61 repl = re.sub(r'\\\\([0-9]+)', r'\\\1', repl)
62 try:
62 try:
63 src = re.sub(pattern, repl, src, 1)
63 src = re.sub(pattern, repl, src, 1)
64 except re.error, e:
64 except re.error, e:
65 raise util.Abort(_("bad subrepository pattern in %s: %s")
65 raise util.Abort(_("bad subrepository pattern in %s: %s")
66 % (p.source('subpaths', pattern), e))
66 % (p.source('subpaths', pattern), e))
67 return src
67 return src
68
68
69 state = {}
69 state = {}
70 for path, src in p[''].items():
70 for path, src in p[''].items():
71 kind = 'hg'
71 kind = 'hg'
72 if src.startswith('['):
72 if src.startswith('['):
73 if ']' not in src:
73 if ']' not in src:
74 raise util.Abort(_('missing ] in subrepo source'))
74 raise util.Abort(_('missing ] in subrepo source'))
75 kind, src = src.split(']', 1)
75 kind, src = src.split(']', 1)
76 kind = kind[1:]
76 kind = kind[1:]
77 src = src.lstrip() # strip any extra whitespace after ']'
77 src = src.lstrip() # strip any extra whitespace after ']'
78
78
79 if not util.url(src).isabs():
79 if not util.url(src).isabs():
80 parent = _abssource(ctx._repo, abort=False)
80 parent = _abssource(ctx._repo, abort=False)
81 if parent:
81 if parent:
82 parent = util.url(parent)
82 parent = util.url(parent)
83 parent.path = posixpath.join(parent.path or '', src)
83 parent.path = posixpath.join(parent.path or '', src)
84 parent.path = posixpath.normpath(parent.path)
84 parent.path = posixpath.normpath(parent.path)
85 joined = str(parent)
85 joined = str(parent)
86 # Remap the full joined path and use it if it changes,
86 # Remap the full joined path and use it if it changes,
87 # else remap the original source.
87 # else remap the original source.
88 remapped = remap(joined)
88 remapped = remap(joined)
89 if remapped == joined:
89 if remapped == joined:
90 src = remap(src)
90 src = remap(src)
91 else:
91 else:
92 src = remapped
92 src = remapped
93
93
94 src = remap(src)
94 src = remap(src)
95 state[path] = (src.strip(), rev.get(path, ''), kind)
95 state[util.pconvert(path)] = (src.strip(), rev.get(path, ''), kind)
96
96
97 return state
97 return state
98
98
99 def writestate(repo, state):
99 def writestate(repo, state):
100 """rewrite .hgsubstate in (outer) repo with these subrepo states"""
100 """rewrite .hgsubstate in (outer) repo with these subrepo states"""
101 lines = ['%s %s\n' % (state[s][1], s) for s in sorted(state)]
101 lines = ['%s %s\n' % (state[s][1], s) for s in sorted(state)]
102 repo.wwrite('.hgsubstate', ''.join(lines), '')
102 repo.wwrite('.hgsubstate', ''.join(lines), '')
103
103
104 def submerge(repo, wctx, mctx, actx, overwrite):
104 def submerge(repo, wctx, mctx, actx, overwrite):
105 """delegated from merge.applyupdates: merging of .hgsubstate file
105 """delegated from merge.applyupdates: merging of .hgsubstate file
106 in working context, merging context and ancestor context"""
106 in working context, merging context and ancestor context"""
107 if mctx == actx: # backwards?
107 if mctx == actx: # backwards?
108 actx = wctx.p1()
108 actx = wctx.p1()
109 s1 = wctx.substate
109 s1 = wctx.substate
110 s2 = mctx.substate
110 s2 = mctx.substate
111 sa = actx.substate
111 sa = actx.substate
112 sm = {}
112 sm = {}
113
113
114 repo.ui.debug("subrepo merge %s %s %s\n" % (wctx, mctx, actx))
114 repo.ui.debug("subrepo merge %s %s %s\n" % (wctx, mctx, actx))
115
115
116 def debug(s, msg, r=""):
116 def debug(s, msg, r=""):
117 if r:
117 if r:
118 r = "%s:%s:%s" % r
118 r = "%s:%s:%s" % r
119 repo.ui.debug(" subrepo %s: %s %s\n" % (s, msg, r))
119 repo.ui.debug(" subrepo %s: %s %s\n" % (s, msg, r))
120
120
121 for s, l in s1.items():
121 for s, l in s1.items():
122 a = sa.get(s, nullstate)
122 a = sa.get(s, nullstate)
123 ld = l # local state with possible dirty flag for compares
123 ld = l # local state with possible dirty flag for compares
124 if wctx.sub(s).dirty():
124 if wctx.sub(s).dirty():
125 ld = (l[0], l[1] + "+")
125 ld = (l[0], l[1] + "+")
126 if wctx == actx: # overwrite
126 if wctx == actx: # overwrite
127 a = ld
127 a = ld
128
128
129 if s in s2:
129 if s in s2:
130 r = s2[s]
130 r = s2[s]
131 if ld == r or r == a: # no change or local is newer
131 if ld == r or r == a: # no change or local is newer
132 sm[s] = l
132 sm[s] = l
133 continue
133 continue
134 elif ld == a: # other side changed
134 elif ld == a: # other side changed
135 debug(s, "other changed, get", r)
135 debug(s, "other changed, get", r)
136 wctx.sub(s).get(r, overwrite)
136 wctx.sub(s).get(r, overwrite)
137 sm[s] = r
137 sm[s] = r
138 elif ld[0] != r[0]: # sources differ
138 elif ld[0] != r[0]: # sources differ
139 if repo.ui.promptchoice(
139 if repo.ui.promptchoice(
140 _(' subrepository sources for %s differ\n'
140 _(' subrepository sources for %s differ\n'
141 'use (l)ocal source (%s) or (r)emote source (%s)?')
141 'use (l)ocal source (%s) or (r)emote source (%s)?')
142 % (s, l[0], r[0]),
142 % (s, l[0], r[0]),
143 (_('&Local'), _('&Remote')), 0):
143 (_('&Local'), _('&Remote')), 0):
144 debug(s, "prompt changed, get", r)
144 debug(s, "prompt changed, get", r)
145 wctx.sub(s).get(r, overwrite)
145 wctx.sub(s).get(r, overwrite)
146 sm[s] = r
146 sm[s] = r
147 elif ld[1] == a[1]: # local side is unchanged
147 elif ld[1] == a[1]: # local side is unchanged
148 debug(s, "other side changed, get", r)
148 debug(s, "other side changed, get", r)
149 wctx.sub(s).get(r, overwrite)
149 wctx.sub(s).get(r, overwrite)
150 sm[s] = r
150 sm[s] = r
151 else:
151 else:
152 debug(s, "both sides changed, merge with", r)
152 debug(s, "both sides changed, merge with", r)
153 wctx.sub(s).merge(r)
153 wctx.sub(s).merge(r)
154 sm[s] = l
154 sm[s] = l
155 elif ld == a: # remote removed, local unchanged
155 elif ld == a: # remote removed, local unchanged
156 debug(s, "remote removed, remove")
156 debug(s, "remote removed, remove")
157 wctx.sub(s).remove()
157 wctx.sub(s).remove()
158 elif a == nullstate: # not present in remote or ancestor
158 elif a == nullstate: # not present in remote or ancestor
159 debug(s, "local added, keep")
159 debug(s, "local added, keep")
160 sm[s] = l
160 sm[s] = l
161 continue
161 continue
162 else:
162 else:
163 if repo.ui.promptchoice(
163 if repo.ui.promptchoice(
164 _(' local changed subrepository %s which remote removed\n'
164 _(' local changed subrepository %s which remote removed\n'
165 'use (c)hanged version or (d)elete?') % s,
165 'use (c)hanged version or (d)elete?') % s,
166 (_('&Changed'), _('&Delete')), 0):
166 (_('&Changed'), _('&Delete')), 0):
167 debug(s, "prompt remove")
167 debug(s, "prompt remove")
168 wctx.sub(s).remove()
168 wctx.sub(s).remove()
169
169
170 for s, r in sorted(s2.items()):
170 for s, r in sorted(s2.items()):
171 if s in s1:
171 if s in s1:
172 continue
172 continue
173 elif s not in sa:
173 elif s not in sa:
174 debug(s, "remote added, get", r)
174 debug(s, "remote added, get", r)
175 mctx.sub(s).get(r)
175 mctx.sub(s).get(r)
176 sm[s] = r
176 sm[s] = r
177 elif r != sa[s]:
177 elif r != sa[s]:
178 if repo.ui.promptchoice(
178 if repo.ui.promptchoice(
179 _(' remote changed subrepository %s which local removed\n'
179 _(' remote changed subrepository %s which local removed\n'
180 'use (c)hanged version or (d)elete?') % s,
180 'use (c)hanged version or (d)elete?') % s,
181 (_('&Changed'), _('&Delete')), 0) == 0:
181 (_('&Changed'), _('&Delete')), 0) == 0:
182 debug(s, "prompt recreate", r)
182 debug(s, "prompt recreate", r)
183 wctx.sub(s).get(r)
183 wctx.sub(s).get(r)
184 sm[s] = r
184 sm[s] = r
185
185
186 # record merged .hgsubstate
186 # record merged .hgsubstate
187 writestate(repo, sm)
187 writestate(repo, sm)
188
188
189 def _updateprompt(ui, sub, dirty, local, remote):
189 def _updateprompt(ui, sub, dirty, local, remote):
190 if dirty:
190 if dirty:
191 msg = (_(' subrepository sources for %s differ\n'
191 msg = (_(' subrepository sources for %s differ\n'
192 'use (l)ocal source (%s) or (r)emote source (%s)?\n')
192 'use (l)ocal source (%s) or (r)emote source (%s)?\n')
193 % (subrelpath(sub), local, remote))
193 % (subrelpath(sub), local, remote))
194 else:
194 else:
195 msg = (_(' subrepository sources for %s differ (in checked out version)\n'
195 msg = (_(' subrepository sources for %s differ (in checked out version)\n'
196 'use (l)ocal source (%s) or (r)emote source (%s)?\n')
196 'use (l)ocal source (%s) or (r)emote source (%s)?\n')
197 % (subrelpath(sub), local, remote))
197 % (subrelpath(sub), local, remote))
198 return ui.promptchoice(msg, (_('&Local'), _('&Remote')), 0)
198 return ui.promptchoice(msg, (_('&Local'), _('&Remote')), 0)
199
199
200 def reporelpath(repo):
200 def reporelpath(repo):
201 """return path to this (sub)repo as seen from outermost repo"""
201 """return path to this (sub)repo as seen from outermost repo"""
202 parent = repo
202 parent = repo
203 while util.safehasattr(parent, '_subparent'):
203 while util.safehasattr(parent, '_subparent'):
204 parent = parent._subparent
204 parent = parent._subparent
205 p = parent.root.rstrip(os.sep)
205 p = parent.root.rstrip(os.sep)
206 return repo.root[len(p) + 1:]
206 return repo.root[len(p) + 1:]
207
207
208 def subrelpath(sub):
208 def subrelpath(sub):
209 """return path to this subrepo as seen from outermost repo"""
209 """return path to this subrepo as seen from outermost repo"""
210 if util.safehasattr(sub, '_relpath'):
210 if util.safehasattr(sub, '_relpath'):
211 return sub._relpath
211 return sub._relpath
212 if not util.safehasattr(sub, '_repo'):
212 if not util.safehasattr(sub, '_repo'):
213 return sub._path
213 return sub._path
214 return reporelpath(sub._repo)
214 return reporelpath(sub._repo)
215
215
216 def _abssource(repo, push=False, abort=True):
216 def _abssource(repo, push=False, abort=True):
217 """return pull/push path of repo - either based on parent repo .hgsub info
217 """return pull/push path of repo - either based on parent repo .hgsub info
218 or on the top repo config. Abort or return None if no source found."""
218 or on the top repo config. Abort or return None if no source found."""
219 if util.safehasattr(repo, '_subparent'):
219 if util.safehasattr(repo, '_subparent'):
220 source = util.url(repo._subsource)
220 source = util.url(repo._subsource)
221 if source.isabs():
221 if source.isabs():
222 return str(source)
222 return str(source)
223 source.path = posixpath.normpath(source.path)
223 source.path = posixpath.normpath(source.path)
224 parent = _abssource(repo._subparent, push, abort=False)
224 parent = _abssource(repo._subparent, push, abort=False)
225 if parent:
225 if parent:
226 parent = util.url(util.pconvert(parent))
226 parent = util.url(util.pconvert(parent))
227 parent.path = posixpath.join(parent.path or '', source.path)
227 parent.path = posixpath.join(parent.path or '', source.path)
228 parent.path = posixpath.normpath(parent.path)
228 parent.path = posixpath.normpath(parent.path)
229 return str(parent)
229 return str(parent)
230 else: # recursion reached top repo
230 else: # recursion reached top repo
231 if util.safehasattr(repo, '_subtoppath'):
231 if util.safehasattr(repo, '_subtoppath'):
232 return repo._subtoppath
232 return repo._subtoppath
233 if push and repo.ui.config('paths', 'default-push'):
233 if push and repo.ui.config('paths', 'default-push'):
234 return repo.ui.config('paths', 'default-push')
234 return repo.ui.config('paths', 'default-push')
235 if repo.ui.config('paths', 'default'):
235 if repo.ui.config('paths', 'default'):
236 return repo.ui.config('paths', 'default')
236 return repo.ui.config('paths', 'default')
237 if abort:
237 if abort:
238 raise util.Abort(_("default path for subrepository %s not found") %
238 raise util.Abort(_("default path for subrepository %s not found") %
239 reporelpath(repo))
239 reporelpath(repo))
240
240
241 def itersubrepos(ctx1, ctx2):
241 def itersubrepos(ctx1, ctx2):
242 """find subrepos in ctx1 or ctx2"""
242 """find subrepos in ctx1 or ctx2"""
243 # Create a (subpath, ctx) mapping where we prefer subpaths from
243 # Create a (subpath, ctx) mapping where we prefer subpaths from
244 # ctx1. The subpaths from ctx2 are important when the .hgsub file
244 # ctx1. The subpaths from ctx2 are important when the .hgsub file
245 # has been modified (in ctx2) but not yet committed (in ctx1).
245 # has been modified (in ctx2) but not yet committed (in ctx1).
246 subpaths = dict.fromkeys(ctx2.substate, ctx2)
246 subpaths = dict.fromkeys(ctx2.substate, ctx2)
247 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
247 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
248 for subpath, ctx in sorted(subpaths.iteritems()):
248 for subpath, ctx in sorted(subpaths.iteritems()):
249 yield subpath, ctx.sub(subpath)
249 yield subpath, ctx.sub(subpath)
250
250
251 def subrepo(ctx, path):
251 def subrepo(ctx, path):
252 """return instance of the right subrepo class for subrepo in path"""
252 """return instance of the right subrepo class for subrepo in path"""
253 # subrepo inherently violates our import layering rules
253 # subrepo inherently violates our import layering rules
254 # because it wants to make repo objects from deep inside the stack
254 # because it wants to make repo objects from deep inside the stack
255 # so we manually delay the circular imports to not break
255 # so we manually delay the circular imports to not break
256 # scripts that don't use our demand-loading
256 # scripts that don't use our demand-loading
257 global hg
257 global hg
258 import hg as h
258 import hg as h
259 hg = h
259 hg = h
260
260
261 scmutil.pathauditor(ctx._repo.root)(path)
261 scmutil.pathauditor(ctx._repo.root)(path)
262 state = ctx.substate.get(path, nullstate)
262 state = ctx.substate.get(path, nullstate)
263 if state[2] not in types:
263 if state[2] not in types:
264 raise util.Abort(_('unknown subrepo type %s') % state[2])
264 raise util.Abort(_('unknown subrepo type %s') % state[2])
265 return types[state[2]](ctx, path, state[:2])
265 return types[state[2]](ctx, path, state[:2])
266
266
267 # subrepo classes need to implement the following abstract class:
267 # subrepo classes need to implement the following abstract class:
268
268
269 class abstractsubrepo(object):
269 class abstractsubrepo(object):
270
270
271 def dirty(self, ignoreupdate=False):
271 def dirty(self, ignoreupdate=False):
272 """returns true if the dirstate of the subrepo is dirty or does not
272 """returns true if the dirstate of the subrepo is dirty or does not
273 match current stored state. If ignoreupdate is true, only check
273 match current stored state. If ignoreupdate is true, only check
274 whether the subrepo has uncommitted changes in its dirstate.
274 whether the subrepo has uncommitted changes in its dirstate.
275 """
275 """
276 raise NotImplementedError
276 raise NotImplementedError
277
277
278 def checknested(self, path):
278 def checknested(self, path):
279 """check if path is a subrepository within this repository"""
279 """check if path is a subrepository within this repository"""
280 return False
280 return False
281
281
282 def commit(self, text, user, date):
282 def commit(self, text, user, date):
283 """commit the current changes to the subrepo with the given
283 """commit the current changes to the subrepo with the given
284 log message. Use given user and date if possible. Return the
284 log message. Use given user and date if possible. Return the
285 new state of the subrepo.
285 new state of the subrepo.
286 """
286 """
287 raise NotImplementedError
287 raise NotImplementedError
288
288
289 def remove(self):
289 def remove(self):
290 """remove the subrepo
290 """remove the subrepo
291
291
292 (should verify the dirstate is not dirty first)
292 (should verify the dirstate is not dirty first)
293 """
293 """
294 raise NotImplementedError
294 raise NotImplementedError
295
295
296 def get(self, state, overwrite=False):
296 def get(self, state, overwrite=False):
297 """run whatever commands are needed to put the subrepo into
297 """run whatever commands are needed to put the subrepo into
298 this state
298 this state
299 """
299 """
300 raise NotImplementedError
300 raise NotImplementedError
301
301
302 def merge(self, state):
302 def merge(self, state):
303 """merge currently-saved state with the new state."""
303 """merge currently-saved state with the new state."""
304 raise NotImplementedError
304 raise NotImplementedError
305
305
306 def push(self, opts):
306 def push(self, opts):
307 """perform whatever action is analogous to 'hg push'
307 """perform whatever action is analogous to 'hg push'
308
308
309 This may be a no-op on some systems.
309 This may be a no-op on some systems.
310 """
310 """
311 raise NotImplementedError
311 raise NotImplementedError
312
312
313 def add(self, ui, match, dryrun, prefix):
313 def add(self, ui, match, dryrun, prefix):
314 return []
314 return []
315
315
316 def status(self, rev2, **opts):
316 def status(self, rev2, **opts):
317 return [], [], [], [], [], [], []
317 return [], [], [], [], [], [], []
318
318
319 def diff(self, diffopts, node2, match, prefix, **opts):
319 def diff(self, diffopts, node2, match, prefix, **opts):
320 pass
320 pass
321
321
322 def outgoing(self, ui, dest, opts):
322 def outgoing(self, ui, dest, opts):
323 return 1
323 return 1
324
324
325 def incoming(self, ui, source, opts):
325 def incoming(self, ui, source, opts):
326 return 1
326 return 1
327
327
328 def files(self):
328 def files(self):
329 """return filename iterator"""
329 """return filename iterator"""
330 raise NotImplementedError
330 raise NotImplementedError
331
331
332 def filedata(self, name):
332 def filedata(self, name):
333 """return file data"""
333 """return file data"""
334 raise NotImplementedError
334 raise NotImplementedError
335
335
336 def fileflags(self, name):
336 def fileflags(self, name):
337 """return file flags"""
337 """return file flags"""
338 return ''
338 return ''
339
339
340 def archive(self, ui, archiver, prefix):
340 def archive(self, ui, archiver, prefix):
341 files = self.files()
341 files = self.files()
342 total = len(files)
342 total = len(files)
343 relpath = subrelpath(self)
343 relpath = subrelpath(self)
344 ui.progress(_('archiving (%s)') % relpath, 0,
344 ui.progress(_('archiving (%s)') % relpath, 0,
345 unit=_('files'), total=total)
345 unit=_('files'), total=total)
346 for i, name in enumerate(files):
346 for i, name in enumerate(files):
347 flags = self.fileflags(name)
347 flags = self.fileflags(name)
348 mode = 'x' in flags and 0755 or 0644
348 mode = 'x' in flags and 0755 or 0644
349 symlink = 'l' in flags
349 symlink = 'l' in flags
350 archiver.addfile(os.path.join(prefix, self._path, name),
350 archiver.addfile(os.path.join(prefix, self._path, name),
351 mode, symlink, self.filedata(name))
351 mode, symlink, self.filedata(name))
352 ui.progress(_('archiving (%s)') % relpath, i + 1,
352 ui.progress(_('archiving (%s)') % relpath, i + 1,
353 unit=_('files'), total=total)
353 unit=_('files'), total=total)
354 ui.progress(_('archiving (%s)') % relpath, None)
354 ui.progress(_('archiving (%s)') % relpath, None)
355
355
356 def walk(self, match):
356 def walk(self, match):
357 '''
357 '''
358 walk recursively through the directory tree, finding all files
358 walk recursively through the directory tree, finding all files
359 matched by the match function
359 matched by the match function
360 '''
360 '''
361 pass
361 pass
362
362
363 def forget(self, files):
363 def forget(self, files):
364 pass
364 pass
365
365
366 class hgsubrepo(abstractsubrepo):
366 class hgsubrepo(abstractsubrepo):
367 def __init__(self, ctx, path, state):
367 def __init__(self, ctx, path, state):
368 self._path = path
368 self._path = path
369 self._state = state
369 self._state = state
370 r = ctx._repo
370 r = ctx._repo
371 root = r.wjoin(path)
371 root = r.wjoin(path)
372 create = False
372 create = False
373 if not os.path.exists(os.path.join(root, '.hg')):
373 if not os.path.exists(os.path.join(root, '.hg')):
374 create = True
374 create = True
375 util.makedirs(root)
375 util.makedirs(root)
376 self._repo = hg.repository(r.ui, root, create=create)
376 self._repo = hg.repository(r.ui, root, create=create)
377 self._initrepo(r, state[0], create)
377 self._initrepo(r, state[0], create)
378
378
379 def _initrepo(self, parentrepo, source, create):
379 def _initrepo(self, parentrepo, source, create):
380 self._repo._subparent = parentrepo
380 self._repo._subparent = parentrepo
381 self._repo._subsource = source
381 self._repo._subsource = source
382
382
383 if create:
383 if create:
384 fp = self._repo.opener("hgrc", "w", text=True)
384 fp = self._repo.opener("hgrc", "w", text=True)
385 fp.write('[paths]\n')
385 fp.write('[paths]\n')
386
386
387 def addpathconfig(key, value):
387 def addpathconfig(key, value):
388 if value:
388 if value:
389 fp.write('%s = %s\n' % (key, value))
389 fp.write('%s = %s\n' % (key, value))
390 self._repo.ui.setconfig('paths', key, value)
390 self._repo.ui.setconfig('paths', key, value)
391
391
392 defpath = _abssource(self._repo, abort=False)
392 defpath = _abssource(self._repo, abort=False)
393 defpushpath = _abssource(self._repo, True, abort=False)
393 defpushpath = _abssource(self._repo, True, abort=False)
394 addpathconfig('default', defpath)
394 addpathconfig('default', defpath)
395 if defpath != defpushpath:
395 if defpath != defpushpath:
396 addpathconfig('default-push', defpushpath)
396 addpathconfig('default-push', defpushpath)
397 fp.close()
397 fp.close()
398
398
399 def add(self, ui, match, dryrun, prefix):
399 def add(self, ui, match, dryrun, prefix):
400 return cmdutil.add(ui, self._repo, match, dryrun, True,
400 return cmdutil.add(ui, self._repo, match, dryrun, True,
401 os.path.join(prefix, self._path))
401 os.path.join(prefix, self._path))
402
402
403 def status(self, rev2, **opts):
403 def status(self, rev2, **opts):
404 try:
404 try:
405 rev1 = self._state[1]
405 rev1 = self._state[1]
406 ctx1 = self._repo[rev1]
406 ctx1 = self._repo[rev1]
407 ctx2 = self._repo[rev2]
407 ctx2 = self._repo[rev2]
408 return self._repo.status(ctx1, ctx2, **opts)
408 return self._repo.status(ctx1, ctx2, **opts)
409 except error.RepoLookupError, inst:
409 except error.RepoLookupError, inst:
410 self._repo.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
410 self._repo.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
411 % (inst, subrelpath(self)))
411 % (inst, subrelpath(self)))
412 return [], [], [], [], [], [], []
412 return [], [], [], [], [], [], []
413
413
414 def diff(self, diffopts, node2, match, prefix, **opts):
414 def diff(self, diffopts, node2, match, prefix, **opts):
415 try:
415 try:
416 node1 = node.bin(self._state[1])
416 node1 = node.bin(self._state[1])
417 # We currently expect node2 to come from substate and be
417 # We currently expect node2 to come from substate and be
418 # in hex format
418 # in hex format
419 if node2 is not None:
419 if node2 is not None:
420 node2 = node.bin(node2)
420 node2 = node.bin(node2)
421 cmdutil.diffordiffstat(self._repo.ui, self._repo, diffopts,
421 cmdutil.diffordiffstat(self._repo.ui, self._repo, diffopts,
422 node1, node2, match,
422 node1, node2, match,
423 prefix=os.path.join(prefix, self._path),
423 prefix=os.path.join(prefix, self._path),
424 listsubrepos=True, **opts)
424 listsubrepos=True, **opts)
425 except error.RepoLookupError, inst:
425 except error.RepoLookupError, inst:
426 self._repo.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
426 self._repo.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
427 % (inst, subrelpath(self)))
427 % (inst, subrelpath(self)))
428
428
429 def archive(self, ui, archiver, prefix):
429 def archive(self, ui, archiver, prefix):
430 self._get(self._state + ('hg',))
430 self._get(self._state + ('hg',))
431 abstractsubrepo.archive(self, ui, archiver, prefix)
431 abstractsubrepo.archive(self, ui, archiver, prefix)
432
432
433 rev = self._state[1]
433 rev = self._state[1]
434 ctx = self._repo[rev]
434 ctx = self._repo[rev]
435 for subpath in ctx.substate:
435 for subpath in ctx.substate:
436 s = subrepo(ctx, subpath)
436 s = subrepo(ctx, subpath)
437 s.archive(ui, archiver, os.path.join(prefix, self._path))
437 s.archive(ui, archiver, os.path.join(prefix, self._path))
438
438
439 def dirty(self, ignoreupdate=False):
439 def dirty(self, ignoreupdate=False):
440 r = self._state[1]
440 r = self._state[1]
441 if r == '' and not ignoreupdate: # no state recorded
441 if r == '' and not ignoreupdate: # no state recorded
442 return True
442 return True
443 w = self._repo[None]
443 w = self._repo[None]
444 if r != w.p1().hex() and not ignoreupdate:
444 if r != w.p1().hex() and not ignoreupdate:
445 # different version checked out
445 # different version checked out
446 return True
446 return True
447 return w.dirty() # working directory changed
447 return w.dirty() # working directory changed
448
448
449 def checknested(self, path):
449 def checknested(self, path):
450 return self._repo._checknested(self._repo.wjoin(path))
450 return self._repo._checknested(self._repo.wjoin(path))
451
451
452 def commit(self, text, user, date):
452 def commit(self, text, user, date):
453 # don't bother committing in the subrepo if it's only been
453 # don't bother committing in the subrepo if it's only been
454 # updated
454 # updated
455 if not self.dirty(True):
455 if not self.dirty(True):
456 return self._repo['.'].hex()
456 return self._repo['.'].hex()
457 self._repo.ui.debug("committing subrepo %s\n" % subrelpath(self))
457 self._repo.ui.debug("committing subrepo %s\n" % subrelpath(self))
458 n = self._repo.commit(text, user, date)
458 n = self._repo.commit(text, user, date)
459 if not n:
459 if not n:
460 return self._repo['.'].hex() # different version checked out
460 return self._repo['.'].hex() # different version checked out
461 return node.hex(n)
461 return node.hex(n)
462
462
463 def remove(self):
463 def remove(self):
464 # we can't fully delete the repository as it may contain
464 # we can't fully delete the repository as it may contain
465 # local-only history
465 # local-only history
466 self._repo.ui.note(_('removing subrepo %s\n') % subrelpath(self))
466 self._repo.ui.note(_('removing subrepo %s\n') % subrelpath(self))
467 hg.clean(self._repo, node.nullid, False)
467 hg.clean(self._repo, node.nullid, False)
468
468
469 def _get(self, state):
469 def _get(self, state):
470 source, revision, kind = state
470 source, revision, kind = state
471 if revision not in self._repo:
471 if revision not in self._repo:
472 self._repo._subsource = source
472 self._repo._subsource = source
473 srcurl = _abssource(self._repo)
473 srcurl = _abssource(self._repo)
474 other = hg.peer(self._repo.ui, {}, srcurl)
474 other = hg.peer(self._repo.ui, {}, srcurl)
475 if len(self._repo) == 0:
475 if len(self._repo) == 0:
476 self._repo.ui.status(_('cloning subrepo %s from %s\n')
476 self._repo.ui.status(_('cloning subrepo %s from %s\n')
477 % (subrelpath(self), srcurl))
477 % (subrelpath(self), srcurl))
478 parentrepo = self._repo._subparent
478 parentrepo = self._repo._subparent
479 shutil.rmtree(self._repo.path)
479 shutil.rmtree(self._repo.path)
480 other, self._repo = hg.clone(self._repo._subparent.ui, {}, other,
480 other, self._repo = hg.clone(self._repo._subparent.ui, {}, other,
481 self._repo.root, update=False)
481 self._repo.root, update=False)
482 self._initrepo(parentrepo, source, create=True)
482 self._initrepo(parentrepo, source, create=True)
483 else:
483 else:
484 self._repo.ui.status(_('pulling subrepo %s from %s\n')
484 self._repo.ui.status(_('pulling subrepo %s from %s\n')
485 % (subrelpath(self), srcurl))
485 % (subrelpath(self), srcurl))
486 self._repo.pull(other)
486 self._repo.pull(other)
487 bookmarks.updatefromremote(self._repo.ui, self._repo, other,
487 bookmarks.updatefromremote(self._repo.ui, self._repo, other,
488 srcurl)
488 srcurl)
489
489
490 def get(self, state, overwrite=False):
490 def get(self, state, overwrite=False):
491 self._get(state)
491 self._get(state)
492 source, revision, kind = state
492 source, revision, kind = state
493 self._repo.ui.debug("getting subrepo %s\n" % self._path)
493 self._repo.ui.debug("getting subrepo %s\n" % self._path)
494 hg.clean(self._repo, revision, False)
494 hg.clean(self._repo, revision, False)
495
495
496 def merge(self, state):
496 def merge(self, state):
497 self._get(state)
497 self._get(state)
498 cur = self._repo['.']
498 cur = self._repo['.']
499 dst = self._repo[state[1]]
499 dst = self._repo[state[1]]
500 anc = dst.ancestor(cur)
500 anc = dst.ancestor(cur)
501
501
502 def mergefunc():
502 def mergefunc():
503 if anc == cur:
503 if anc == cur:
504 self._repo.ui.debug("updating subrepo %s\n" % subrelpath(self))
504 self._repo.ui.debug("updating subrepo %s\n" % subrelpath(self))
505 hg.update(self._repo, state[1])
505 hg.update(self._repo, state[1])
506 elif anc == dst:
506 elif anc == dst:
507 self._repo.ui.debug("skipping subrepo %s\n" % subrelpath(self))
507 self._repo.ui.debug("skipping subrepo %s\n" % subrelpath(self))
508 else:
508 else:
509 self._repo.ui.debug("merging subrepo %s\n" % subrelpath(self))
509 self._repo.ui.debug("merging subrepo %s\n" % subrelpath(self))
510 hg.merge(self._repo, state[1], remind=False)
510 hg.merge(self._repo, state[1], remind=False)
511
511
512 wctx = self._repo[None]
512 wctx = self._repo[None]
513 if self.dirty():
513 if self.dirty():
514 if anc != dst:
514 if anc != dst:
515 if _updateprompt(self._repo.ui, self, wctx.dirty(), cur, dst):
515 if _updateprompt(self._repo.ui, self, wctx.dirty(), cur, dst):
516 mergefunc()
516 mergefunc()
517 else:
517 else:
518 mergefunc()
518 mergefunc()
519 else:
519 else:
520 mergefunc()
520 mergefunc()
521
521
522 def push(self, opts):
522 def push(self, opts):
523 force = opts.get('force')
523 force = opts.get('force')
524 newbranch = opts.get('new_branch')
524 newbranch = opts.get('new_branch')
525 ssh = opts.get('ssh')
525 ssh = opts.get('ssh')
526
526
527 # push subrepos depth-first for coherent ordering
527 # push subrepos depth-first for coherent ordering
528 c = self._repo['']
528 c = self._repo['']
529 subs = c.substate # only repos that are committed
529 subs = c.substate # only repos that are committed
530 for s in sorted(subs):
530 for s in sorted(subs):
531 if not c.sub(s).push(opts):
531 if not c.sub(s).push(opts):
532 return False
532 return False
533
533
534 dsturl = _abssource(self._repo, True)
534 dsturl = _abssource(self._repo, True)
535 self._repo.ui.status(_('pushing subrepo %s to %s\n') %
535 self._repo.ui.status(_('pushing subrepo %s to %s\n') %
536 (subrelpath(self), dsturl))
536 (subrelpath(self), dsturl))
537 other = hg.peer(self._repo.ui, {'ssh': ssh}, dsturl)
537 other = hg.peer(self._repo.ui, {'ssh': ssh}, dsturl)
538 return self._repo.push(other, force, newbranch=newbranch)
538 return self._repo.push(other, force, newbranch=newbranch)
539
539
540 def outgoing(self, ui, dest, opts):
540 def outgoing(self, ui, dest, opts):
541 return hg.outgoing(ui, self._repo, _abssource(self._repo, True), opts)
541 return hg.outgoing(ui, self._repo, _abssource(self._repo, True), opts)
542
542
543 def incoming(self, ui, source, opts):
543 def incoming(self, ui, source, opts):
544 return hg.incoming(ui, self._repo, _abssource(self._repo, False), opts)
544 return hg.incoming(ui, self._repo, _abssource(self._repo, False), opts)
545
545
546 def files(self):
546 def files(self):
547 rev = self._state[1]
547 rev = self._state[1]
548 ctx = self._repo[rev]
548 ctx = self._repo[rev]
549 return ctx.manifest()
549 return ctx.manifest()
550
550
551 def filedata(self, name):
551 def filedata(self, name):
552 rev = self._state[1]
552 rev = self._state[1]
553 return self._repo[rev][name].data()
553 return self._repo[rev][name].data()
554
554
555 def fileflags(self, name):
555 def fileflags(self, name):
556 rev = self._state[1]
556 rev = self._state[1]
557 ctx = self._repo[rev]
557 ctx = self._repo[rev]
558 return ctx.flags(name)
558 return ctx.flags(name)
559
559
560 def walk(self, match):
560 def walk(self, match):
561 ctx = self._repo[None]
561 ctx = self._repo[None]
562 return ctx.walk(match)
562 return ctx.walk(match)
563
563
564 def forget(self, files):
564 def forget(self, files):
565 ctx = self._repo[None]
565 ctx = self._repo[None]
566 ctx.forget(files)
566 ctx.forget(files)
567
567
568 class svnsubrepo(abstractsubrepo):
568 class svnsubrepo(abstractsubrepo):
569 def __init__(self, ctx, path, state):
569 def __init__(self, ctx, path, state):
570 self._path = path
570 self._path = path
571 self._state = state
571 self._state = state
572 self._ctx = ctx
572 self._ctx = ctx
573 self._ui = ctx._repo.ui
573 self._ui = ctx._repo.ui
574 self._exe = util.findexe('svn')
574 self._exe = util.findexe('svn')
575 if not self._exe:
575 if not self._exe:
576 raise util.Abort(_("'svn' executable not found for subrepo '%s'")
576 raise util.Abort(_("'svn' executable not found for subrepo '%s'")
577 % self._path)
577 % self._path)
578
578
579 def _svncommand(self, commands, filename='', failok=False):
579 def _svncommand(self, commands, filename='', failok=False):
580 cmd = [self._exe]
580 cmd = [self._exe]
581 extrakw = {}
581 extrakw = {}
582 if not self._ui.interactive():
582 if not self._ui.interactive():
583 # Making stdin be a pipe should prevent svn from behaving
583 # Making stdin be a pipe should prevent svn from behaving
584 # interactively even if we can't pass --non-interactive.
584 # interactively even if we can't pass --non-interactive.
585 extrakw['stdin'] = subprocess.PIPE
585 extrakw['stdin'] = subprocess.PIPE
586 # Starting in svn 1.5 --non-interactive is a global flag
586 # Starting in svn 1.5 --non-interactive is a global flag
587 # instead of being per-command, but we need to support 1.4 so
587 # instead of being per-command, but we need to support 1.4 so
588 # we have to be intelligent about what commands take
588 # we have to be intelligent about what commands take
589 # --non-interactive.
589 # --non-interactive.
590 if commands[0] in ('update', 'checkout', 'commit'):
590 if commands[0] in ('update', 'checkout', 'commit'):
591 cmd.append('--non-interactive')
591 cmd.append('--non-interactive')
592 cmd.extend(commands)
592 cmd.extend(commands)
593 if filename is not None:
593 if filename is not None:
594 path = os.path.join(self._ctx._repo.origroot, self._path, filename)
594 path = os.path.join(self._ctx._repo.origroot, self._path, filename)
595 cmd.append(path)
595 cmd.append(path)
596 env = dict(os.environ)
596 env = dict(os.environ)
597 # Avoid localized output, preserve current locale for everything else.
597 # Avoid localized output, preserve current locale for everything else.
598 env['LC_MESSAGES'] = 'C'
598 env['LC_MESSAGES'] = 'C'
599 p = subprocess.Popen(cmd, bufsize=-1, close_fds=util.closefds,
599 p = subprocess.Popen(cmd, bufsize=-1, close_fds=util.closefds,
600 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
600 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
601 universal_newlines=True, env=env, **extrakw)
601 universal_newlines=True, env=env, **extrakw)
602 stdout, stderr = p.communicate()
602 stdout, stderr = p.communicate()
603 stderr = stderr.strip()
603 stderr = stderr.strip()
604 if not failok:
604 if not failok:
605 if p.returncode:
605 if p.returncode:
606 raise util.Abort(stderr or 'exited with code %d' % p.returncode)
606 raise util.Abort(stderr or 'exited with code %d' % p.returncode)
607 if stderr:
607 if stderr:
608 self._ui.warn(stderr + '\n')
608 self._ui.warn(stderr + '\n')
609 return stdout, stderr
609 return stdout, stderr
610
610
611 @propertycache
611 @propertycache
612 def _svnversion(self):
612 def _svnversion(self):
613 output, err = self._svncommand(['--version'], filename=None)
613 output, err = self._svncommand(['--version'], filename=None)
614 m = re.search(r'^svn,\s+version\s+(\d+)\.(\d+)', output)
614 m = re.search(r'^svn,\s+version\s+(\d+)\.(\d+)', output)
615 if not m:
615 if not m:
616 raise util.Abort(_('cannot retrieve svn tool version'))
616 raise util.Abort(_('cannot retrieve svn tool version'))
617 return (int(m.group(1)), int(m.group(2)))
617 return (int(m.group(1)), int(m.group(2)))
618
618
619 def _wcrevs(self):
619 def _wcrevs(self):
620 # Get the working directory revision as well as the last
620 # Get the working directory revision as well as the last
621 # commit revision so we can compare the subrepo state with
621 # commit revision so we can compare the subrepo state with
622 # both. We used to store the working directory one.
622 # both. We used to store the working directory one.
623 output, err = self._svncommand(['info', '--xml'])
623 output, err = self._svncommand(['info', '--xml'])
624 doc = xml.dom.minidom.parseString(output)
624 doc = xml.dom.minidom.parseString(output)
625 entries = doc.getElementsByTagName('entry')
625 entries = doc.getElementsByTagName('entry')
626 lastrev, rev = '0', '0'
626 lastrev, rev = '0', '0'
627 if entries:
627 if entries:
628 rev = str(entries[0].getAttribute('revision')) or '0'
628 rev = str(entries[0].getAttribute('revision')) or '0'
629 commits = entries[0].getElementsByTagName('commit')
629 commits = entries[0].getElementsByTagName('commit')
630 if commits:
630 if commits:
631 lastrev = str(commits[0].getAttribute('revision')) or '0'
631 lastrev = str(commits[0].getAttribute('revision')) or '0'
632 return (lastrev, rev)
632 return (lastrev, rev)
633
633
634 def _wcrev(self):
634 def _wcrev(self):
635 return self._wcrevs()[0]
635 return self._wcrevs()[0]
636
636
637 def _wcchanged(self):
637 def _wcchanged(self):
638 """Return (changes, extchanges) where changes is True
638 """Return (changes, extchanges) where changes is True
639 if the working directory was changed, and extchanges is
639 if the working directory was changed, and extchanges is
640 True if any of these changes concern an external entry.
640 True if any of these changes concern an external entry.
641 """
641 """
642 output, err = self._svncommand(['status', '--xml'])
642 output, err = self._svncommand(['status', '--xml'])
643 externals, changes = [], []
643 externals, changes = [], []
644 doc = xml.dom.minidom.parseString(output)
644 doc = xml.dom.minidom.parseString(output)
645 for e in doc.getElementsByTagName('entry'):
645 for e in doc.getElementsByTagName('entry'):
646 s = e.getElementsByTagName('wc-status')
646 s = e.getElementsByTagName('wc-status')
647 if not s:
647 if not s:
648 continue
648 continue
649 item = s[0].getAttribute('item')
649 item = s[0].getAttribute('item')
650 props = s[0].getAttribute('props')
650 props = s[0].getAttribute('props')
651 path = e.getAttribute('path')
651 path = e.getAttribute('path')
652 if item == 'external':
652 if item == 'external':
653 externals.append(path)
653 externals.append(path)
654 if (item not in ('', 'normal', 'unversioned', 'external')
654 if (item not in ('', 'normal', 'unversioned', 'external')
655 or props not in ('', 'none', 'normal')):
655 or props not in ('', 'none', 'normal')):
656 changes.append(path)
656 changes.append(path)
657 for path in changes:
657 for path in changes:
658 for ext in externals:
658 for ext in externals:
659 if path == ext or path.startswith(ext + os.sep):
659 if path == ext or path.startswith(ext + os.sep):
660 return True, True
660 return True, True
661 return bool(changes), False
661 return bool(changes), False
662
662
663 def dirty(self, ignoreupdate=False):
663 def dirty(self, ignoreupdate=False):
664 if not self._wcchanged()[0]:
664 if not self._wcchanged()[0]:
665 if self._state[1] in self._wcrevs() or ignoreupdate:
665 if self._state[1] in self._wcrevs() or ignoreupdate:
666 return False
666 return False
667 return True
667 return True
668
668
669 def commit(self, text, user, date):
669 def commit(self, text, user, date):
670 # user and date are out of our hands since svn is centralized
670 # user and date are out of our hands since svn is centralized
671 changed, extchanged = self._wcchanged()
671 changed, extchanged = self._wcchanged()
672 if not changed:
672 if not changed:
673 return self._wcrev()
673 return self._wcrev()
674 if extchanged:
674 if extchanged:
675 # Do not try to commit externals
675 # Do not try to commit externals
676 raise util.Abort(_('cannot commit svn externals'))
676 raise util.Abort(_('cannot commit svn externals'))
677 commitinfo, err = self._svncommand(['commit', '-m', text])
677 commitinfo, err = self._svncommand(['commit', '-m', text])
678 self._ui.status(commitinfo)
678 self._ui.status(commitinfo)
679 newrev = re.search('Committed revision ([0-9]+).', commitinfo)
679 newrev = re.search('Committed revision ([0-9]+).', commitinfo)
680 if not newrev:
680 if not newrev:
681 raise util.Abort(commitinfo.splitlines()[-1])
681 raise util.Abort(commitinfo.splitlines()[-1])
682 newrev = newrev.groups()[0]
682 newrev = newrev.groups()[0]
683 self._ui.status(self._svncommand(['update', '-r', newrev])[0])
683 self._ui.status(self._svncommand(['update', '-r', newrev])[0])
684 return newrev
684 return newrev
685
685
686 def remove(self):
686 def remove(self):
687 if self.dirty():
687 if self.dirty():
688 self._ui.warn(_('not removing repo %s because '
688 self._ui.warn(_('not removing repo %s because '
689 'it has changes.\n' % self._path))
689 'it has changes.\n' % self._path))
690 return
690 return
691 self._ui.note(_('removing subrepo %s\n') % self._path)
691 self._ui.note(_('removing subrepo %s\n') % self._path)
692
692
693 def onerror(function, path, excinfo):
693 def onerror(function, path, excinfo):
694 if function is not os.remove:
694 if function is not os.remove:
695 raise
695 raise
696 # read-only files cannot be unlinked under Windows
696 # read-only files cannot be unlinked under Windows
697 s = os.stat(path)
697 s = os.stat(path)
698 if (s.st_mode & stat.S_IWRITE) != 0:
698 if (s.st_mode & stat.S_IWRITE) != 0:
699 raise
699 raise
700 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
700 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
701 os.remove(path)
701 os.remove(path)
702
702
703 path = self._ctx._repo.wjoin(self._path)
703 path = self._ctx._repo.wjoin(self._path)
704 shutil.rmtree(path, onerror=onerror)
704 shutil.rmtree(path, onerror=onerror)
705 try:
705 try:
706 os.removedirs(os.path.dirname(path))
706 os.removedirs(os.path.dirname(path))
707 except OSError:
707 except OSError:
708 pass
708 pass
709
709
710 def get(self, state, overwrite=False):
710 def get(self, state, overwrite=False):
711 if overwrite:
711 if overwrite:
712 self._svncommand(['revert', '--recursive'])
712 self._svncommand(['revert', '--recursive'])
713 args = ['checkout']
713 args = ['checkout']
714 if self._svnversion >= (1, 5):
714 if self._svnversion >= (1, 5):
715 args.append('--force')
715 args.append('--force')
716 # The revision must be specified at the end of the URL to properly
716 # The revision must be specified at the end of the URL to properly
717 # update to a directory which has since been deleted and recreated.
717 # update to a directory which has since been deleted and recreated.
718 args.append('%s@%s' % (state[0], state[1]))
718 args.append('%s@%s' % (state[0], state[1]))
719 status, err = self._svncommand(args, failok=True)
719 status, err = self._svncommand(args, failok=True)
720 if not re.search('Checked out revision [0-9]+.', status):
720 if not re.search('Checked out revision [0-9]+.', status):
721 if ('is already a working copy for a different URL' in err
721 if ('is already a working copy for a different URL' in err
722 and (self._wcchanged() == (False, False))):
722 and (self._wcchanged() == (False, False))):
723 # obstructed but clean working copy, so just blow it away.
723 # obstructed but clean working copy, so just blow it away.
724 self.remove()
724 self.remove()
725 self.get(state, overwrite=False)
725 self.get(state, overwrite=False)
726 return
726 return
727 raise util.Abort((status or err).splitlines()[-1])
727 raise util.Abort((status or err).splitlines()[-1])
728 self._ui.status(status)
728 self._ui.status(status)
729
729
730 def merge(self, state):
730 def merge(self, state):
731 old = self._state[1]
731 old = self._state[1]
732 new = state[1]
732 new = state[1]
733 if new != self._wcrev():
733 if new != self._wcrev():
734 dirty = old == self._wcrev() or self._wcchanged()[0]
734 dirty = old == self._wcrev() or self._wcchanged()[0]
735 if _updateprompt(self._ui, self, dirty, self._wcrev(), new):
735 if _updateprompt(self._ui, self, dirty, self._wcrev(), new):
736 self.get(state, False)
736 self.get(state, False)
737
737
738 def push(self, opts):
738 def push(self, opts):
739 # push is a no-op for SVN
739 # push is a no-op for SVN
740 return True
740 return True
741
741
742 def files(self):
742 def files(self):
743 output = self._svncommand(['list'])
743 output = self._svncommand(['list'])
744 # This works because svn forbids \n in filenames.
744 # This works because svn forbids \n in filenames.
745 return output.splitlines()
745 return output.splitlines()
746
746
747 def filedata(self, name):
747 def filedata(self, name):
748 return self._svncommand(['cat'], name)
748 return self._svncommand(['cat'], name)
749
749
750
750
751 class gitsubrepo(abstractsubrepo):
751 class gitsubrepo(abstractsubrepo):
752 def __init__(self, ctx, path, state):
752 def __init__(self, ctx, path, state):
753 # TODO add git version check.
753 # TODO add git version check.
754 self._state = state
754 self._state = state
755 self._ctx = ctx
755 self._ctx = ctx
756 self._path = path
756 self._path = path
757 self._relpath = os.path.join(reporelpath(ctx._repo), path)
757 self._relpath = os.path.join(reporelpath(ctx._repo), path)
758 self._abspath = ctx._repo.wjoin(path)
758 self._abspath = ctx._repo.wjoin(path)
759 self._subparent = ctx._repo
759 self._subparent = ctx._repo
760 self._ui = ctx._repo.ui
760 self._ui = ctx._repo.ui
761
761
762 def _gitcommand(self, commands, env=None, stream=False):
762 def _gitcommand(self, commands, env=None, stream=False):
763 return self._gitdir(commands, env=env, stream=stream)[0]
763 return self._gitdir(commands, env=env, stream=stream)[0]
764
764
765 def _gitdir(self, commands, env=None, stream=False):
765 def _gitdir(self, commands, env=None, stream=False):
766 return self._gitnodir(commands, env=env, stream=stream,
766 return self._gitnodir(commands, env=env, stream=stream,
767 cwd=self._abspath)
767 cwd=self._abspath)
768
768
769 def _gitnodir(self, commands, env=None, stream=False, cwd=None):
769 def _gitnodir(self, commands, env=None, stream=False, cwd=None):
770 """Calls the git command
770 """Calls the git command
771
771
772 The methods tries to call the git command. versions previor to 1.6.0
772 The methods tries to call the git command. versions previor to 1.6.0
773 are not supported and very probably fail.
773 are not supported and very probably fail.
774 """
774 """
775 self._ui.debug('%s: git %s\n' % (self._relpath, ' '.join(commands)))
775 self._ui.debug('%s: git %s\n' % (self._relpath, ' '.join(commands)))
776 # unless ui.quiet is set, print git's stderr,
776 # unless ui.quiet is set, print git's stderr,
777 # which is mostly progress and useful info
777 # which is mostly progress and useful info
778 errpipe = None
778 errpipe = None
779 if self._ui.quiet:
779 if self._ui.quiet:
780 errpipe = open(os.devnull, 'w')
780 errpipe = open(os.devnull, 'w')
781 p = subprocess.Popen(['git'] + commands, bufsize=-1, cwd=cwd, env=env,
781 p = subprocess.Popen(['git'] + commands, bufsize=-1, cwd=cwd, env=env,
782 close_fds=util.closefds,
782 close_fds=util.closefds,
783 stdout=subprocess.PIPE, stderr=errpipe)
783 stdout=subprocess.PIPE, stderr=errpipe)
784 if stream:
784 if stream:
785 return p.stdout, None
785 return p.stdout, None
786
786
787 retdata = p.stdout.read().strip()
787 retdata = p.stdout.read().strip()
788 # wait for the child to exit to avoid race condition.
788 # wait for the child to exit to avoid race condition.
789 p.wait()
789 p.wait()
790
790
791 if p.returncode != 0 and p.returncode != 1:
791 if p.returncode != 0 and p.returncode != 1:
792 # there are certain error codes that are ok
792 # there are certain error codes that are ok
793 command = commands[0]
793 command = commands[0]
794 if command in ('cat-file', 'symbolic-ref'):
794 if command in ('cat-file', 'symbolic-ref'):
795 return retdata, p.returncode
795 return retdata, p.returncode
796 # for all others, abort
796 # for all others, abort
797 raise util.Abort('git %s error %d in %s' %
797 raise util.Abort('git %s error %d in %s' %
798 (command, p.returncode, self._relpath))
798 (command, p.returncode, self._relpath))
799
799
800 return retdata, p.returncode
800 return retdata, p.returncode
801
801
802 def _gitmissing(self):
802 def _gitmissing(self):
803 return not os.path.exists(os.path.join(self._abspath, '.git'))
803 return not os.path.exists(os.path.join(self._abspath, '.git'))
804
804
805 def _gitstate(self):
805 def _gitstate(self):
806 return self._gitcommand(['rev-parse', 'HEAD'])
806 return self._gitcommand(['rev-parse', 'HEAD'])
807
807
808 def _gitcurrentbranch(self):
808 def _gitcurrentbranch(self):
809 current, err = self._gitdir(['symbolic-ref', 'HEAD', '--quiet'])
809 current, err = self._gitdir(['symbolic-ref', 'HEAD', '--quiet'])
810 if err:
810 if err:
811 current = None
811 current = None
812 return current
812 return current
813
813
814 def _gitremote(self, remote):
814 def _gitremote(self, remote):
815 out = self._gitcommand(['remote', 'show', '-n', remote])
815 out = self._gitcommand(['remote', 'show', '-n', remote])
816 line = out.split('\n')[1]
816 line = out.split('\n')[1]
817 i = line.index('URL: ') + len('URL: ')
817 i = line.index('URL: ') + len('URL: ')
818 return line[i:]
818 return line[i:]
819
819
820 def _githavelocally(self, revision):
820 def _githavelocally(self, revision):
821 out, code = self._gitdir(['cat-file', '-e', revision])
821 out, code = self._gitdir(['cat-file', '-e', revision])
822 return code == 0
822 return code == 0
823
823
824 def _gitisancestor(self, r1, r2):
824 def _gitisancestor(self, r1, r2):
825 base = self._gitcommand(['merge-base', r1, r2])
825 base = self._gitcommand(['merge-base', r1, r2])
826 return base == r1
826 return base == r1
827
827
828 def _gitisbare(self):
828 def _gitisbare(self):
829 return self._gitcommand(['config', '--bool', 'core.bare']) == 'true'
829 return self._gitcommand(['config', '--bool', 'core.bare']) == 'true'
830
830
831 def _gitupdatestat(self):
831 def _gitupdatestat(self):
832 """This must be run before git diff-index.
832 """This must be run before git diff-index.
833 diff-index only looks at changes to file stat;
833 diff-index only looks at changes to file stat;
834 this command looks at file contents and updates the stat."""
834 this command looks at file contents and updates the stat."""
835 self._gitcommand(['update-index', '-q', '--refresh'])
835 self._gitcommand(['update-index', '-q', '--refresh'])
836
836
837 def _gitbranchmap(self):
837 def _gitbranchmap(self):
838 '''returns 2 things:
838 '''returns 2 things:
839 a map from git branch to revision
839 a map from git branch to revision
840 a map from revision to branches'''
840 a map from revision to branches'''
841 branch2rev = {}
841 branch2rev = {}
842 rev2branch = {}
842 rev2branch = {}
843
843
844 out = self._gitcommand(['for-each-ref', '--format',
844 out = self._gitcommand(['for-each-ref', '--format',
845 '%(objectname) %(refname)'])
845 '%(objectname) %(refname)'])
846 for line in out.split('\n'):
846 for line in out.split('\n'):
847 revision, ref = line.split(' ')
847 revision, ref = line.split(' ')
848 if (not ref.startswith('refs/heads/') and
848 if (not ref.startswith('refs/heads/') and
849 not ref.startswith('refs/remotes/')):
849 not ref.startswith('refs/remotes/')):
850 continue
850 continue
851 if ref.startswith('refs/remotes/') and ref.endswith('/HEAD'):
851 if ref.startswith('refs/remotes/') and ref.endswith('/HEAD'):
852 continue # ignore remote/HEAD redirects
852 continue # ignore remote/HEAD redirects
853 branch2rev[ref] = revision
853 branch2rev[ref] = revision
854 rev2branch.setdefault(revision, []).append(ref)
854 rev2branch.setdefault(revision, []).append(ref)
855 return branch2rev, rev2branch
855 return branch2rev, rev2branch
856
856
857 def _gittracking(self, branches):
857 def _gittracking(self, branches):
858 'return map of remote branch to local tracking branch'
858 'return map of remote branch to local tracking branch'
859 # assumes no more than one local tracking branch for each remote
859 # assumes no more than one local tracking branch for each remote
860 tracking = {}
860 tracking = {}
861 for b in branches:
861 for b in branches:
862 if b.startswith('refs/remotes/'):
862 if b.startswith('refs/remotes/'):
863 continue
863 continue
864 bname = b.split('/', 2)[2]
864 bname = b.split('/', 2)[2]
865 remote = self._gitcommand(['config', 'branch.%s.remote' % bname])
865 remote = self._gitcommand(['config', 'branch.%s.remote' % bname])
866 if remote:
866 if remote:
867 ref = self._gitcommand(['config', 'branch.%s.merge' % bname])
867 ref = self._gitcommand(['config', 'branch.%s.merge' % bname])
868 tracking['refs/remotes/%s/%s' %
868 tracking['refs/remotes/%s/%s' %
869 (remote, ref.split('/', 2)[2])] = b
869 (remote, ref.split('/', 2)[2])] = b
870 return tracking
870 return tracking
871
871
872 def _abssource(self, source):
872 def _abssource(self, source):
873 if '://' not in source:
873 if '://' not in source:
874 # recognize the scp syntax as an absolute source
874 # recognize the scp syntax as an absolute source
875 colon = source.find(':')
875 colon = source.find(':')
876 if colon != -1 and '/' not in source[:colon]:
876 if colon != -1 and '/' not in source[:colon]:
877 return source
877 return source
878 self._subsource = source
878 self._subsource = source
879 return _abssource(self)
879 return _abssource(self)
880
880
881 def _fetch(self, source, revision):
881 def _fetch(self, source, revision):
882 if self._gitmissing():
882 if self._gitmissing():
883 source = self._abssource(source)
883 source = self._abssource(source)
884 self._ui.status(_('cloning subrepo %s from %s\n') %
884 self._ui.status(_('cloning subrepo %s from %s\n') %
885 (self._relpath, source))
885 (self._relpath, source))
886 self._gitnodir(['clone', source, self._abspath])
886 self._gitnodir(['clone', source, self._abspath])
887 if self._githavelocally(revision):
887 if self._githavelocally(revision):
888 return
888 return
889 self._ui.status(_('pulling subrepo %s from %s\n') %
889 self._ui.status(_('pulling subrepo %s from %s\n') %
890 (self._relpath, self._gitremote('origin')))
890 (self._relpath, self._gitremote('origin')))
891 # try only origin: the originally cloned repo
891 # try only origin: the originally cloned repo
892 self._gitcommand(['fetch'])
892 self._gitcommand(['fetch'])
893 if not self._githavelocally(revision):
893 if not self._githavelocally(revision):
894 raise util.Abort(_("revision %s does not exist in subrepo %s\n") %
894 raise util.Abort(_("revision %s does not exist in subrepo %s\n") %
895 (revision, self._relpath))
895 (revision, self._relpath))
896
896
897 def dirty(self, ignoreupdate=False):
897 def dirty(self, ignoreupdate=False):
898 if self._gitmissing():
898 if self._gitmissing():
899 return self._state[1] != ''
899 return self._state[1] != ''
900 if self._gitisbare():
900 if self._gitisbare():
901 return True
901 return True
902 if not ignoreupdate and self._state[1] != self._gitstate():
902 if not ignoreupdate and self._state[1] != self._gitstate():
903 # different version checked out
903 # different version checked out
904 return True
904 return True
905 # check for staged changes or modified files; ignore untracked files
905 # check for staged changes or modified files; ignore untracked files
906 self._gitupdatestat()
906 self._gitupdatestat()
907 out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
907 out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
908 return code == 1
908 return code == 1
909
909
910 def get(self, state, overwrite=False):
910 def get(self, state, overwrite=False):
911 source, revision, kind = state
911 source, revision, kind = state
912 if not revision:
912 if not revision:
913 self.remove()
913 self.remove()
914 return
914 return
915 self._fetch(source, revision)
915 self._fetch(source, revision)
916 # if the repo was set to be bare, unbare it
916 # if the repo was set to be bare, unbare it
917 if self._gitisbare():
917 if self._gitisbare():
918 self._gitcommand(['config', 'core.bare', 'false'])
918 self._gitcommand(['config', 'core.bare', 'false'])
919 if self._gitstate() == revision:
919 if self._gitstate() == revision:
920 self._gitcommand(['reset', '--hard', 'HEAD'])
920 self._gitcommand(['reset', '--hard', 'HEAD'])
921 return
921 return
922 elif self._gitstate() == revision:
922 elif self._gitstate() == revision:
923 if overwrite:
923 if overwrite:
924 # first reset the index to unmark new files for commit, because
924 # first reset the index to unmark new files for commit, because
925 # reset --hard will otherwise throw away files added for commit,
925 # reset --hard will otherwise throw away files added for commit,
926 # not just unmark them.
926 # not just unmark them.
927 self._gitcommand(['reset', 'HEAD'])
927 self._gitcommand(['reset', 'HEAD'])
928 self._gitcommand(['reset', '--hard', 'HEAD'])
928 self._gitcommand(['reset', '--hard', 'HEAD'])
929 return
929 return
930 branch2rev, rev2branch = self._gitbranchmap()
930 branch2rev, rev2branch = self._gitbranchmap()
931
931
932 def checkout(args):
932 def checkout(args):
933 cmd = ['checkout']
933 cmd = ['checkout']
934 if overwrite:
934 if overwrite:
935 # first reset the index to unmark new files for commit, because
935 # first reset the index to unmark new files for commit, because
936 # the -f option will otherwise throw away files added for
936 # the -f option will otherwise throw away files added for
937 # commit, not just unmark them.
937 # commit, not just unmark them.
938 self._gitcommand(['reset', 'HEAD'])
938 self._gitcommand(['reset', 'HEAD'])
939 cmd.append('-f')
939 cmd.append('-f')
940 self._gitcommand(cmd + args)
940 self._gitcommand(cmd + args)
941
941
942 def rawcheckout():
942 def rawcheckout():
943 # no branch to checkout, check it out with no branch
943 # no branch to checkout, check it out with no branch
944 self._ui.warn(_('checking out detached HEAD in subrepo %s\n') %
944 self._ui.warn(_('checking out detached HEAD in subrepo %s\n') %
945 self._relpath)
945 self._relpath)
946 self._ui.warn(_('check out a git branch if you intend '
946 self._ui.warn(_('check out a git branch if you intend '
947 'to make changes\n'))
947 'to make changes\n'))
948 checkout(['-q', revision])
948 checkout(['-q', revision])
949
949
950 if revision not in rev2branch:
950 if revision not in rev2branch:
951 rawcheckout()
951 rawcheckout()
952 return
952 return
953 branches = rev2branch[revision]
953 branches = rev2branch[revision]
954 firstlocalbranch = None
954 firstlocalbranch = None
955 for b in branches:
955 for b in branches:
956 if b == 'refs/heads/master':
956 if b == 'refs/heads/master':
957 # master trumps all other branches
957 # master trumps all other branches
958 checkout(['refs/heads/master'])
958 checkout(['refs/heads/master'])
959 return
959 return
960 if not firstlocalbranch and not b.startswith('refs/remotes/'):
960 if not firstlocalbranch and not b.startswith('refs/remotes/'):
961 firstlocalbranch = b
961 firstlocalbranch = b
962 if firstlocalbranch:
962 if firstlocalbranch:
963 checkout([firstlocalbranch])
963 checkout([firstlocalbranch])
964 return
964 return
965
965
966 tracking = self._gittracking(branch2rev.keys())
966 tracking = self._gittracking(branch2rev.keys())
967 # choose a remote branch already tracked if possible
967 # choose a remote branch already tracked if possible
968 remote = branches[0]
968 remote = branches[0]
969 if remote not in tracking:
969 if remote not in tracking:
970 for b in branches:
970 for b in branches:
971 if b in tracking:
971 if b in tracking:
972 remote = b
972 remote = b
973 break
973 break
974
974
975 if remote not in tracking:
975 if remote not in tracking:
976 # create a new local tracking branch
976 # create a new local tracking branch
977 local = remote.split('/', 2)[2]
977 local = remote.split('/', 2)[2]
978 checkout(['-b', local, remote])
978 checkout(['-b', local, remote])
979 elif self._gitisancestor(branch2rev[tracking[remote]], remote):
979 elif self._gitisancestor(branch2rev[tracking[remote]], remote):
980 # When updating to a tracked remote branch,
980 # When updating to a tracked remote branch,
981 # if the local tracking branch is downstream of it,
981 # if the local tracking branch is downstream of it,
982 # a normal `git pull` would have performed a "fast-forward merge"
982 # a normal `git pull` would have performed a "fast-forward merge"
983 # which is equivalent to updating the local branch to the remote.
983 # which is equivalent to updating the local branch to the remote.
984 # Since we are only looking at branching at update, we need to
984 # Since we are only looking at branching at update, we need to
985 # detect this situation and perform this action lazily.
985 # detect this situation and perform this action lazily.
986 if tracking[remote] != self._gitcurrentbranch():
986 if tracking[remote] != self._gitcurrentbranch():
987 checkout([tracking[remote]])
987 checkout([tracking[remote]])
988 self._gitcommand(['merge', '--ff', remote])
988 self._gitcommand(['merge', '--ff', remote])
989 else:
989 else:
990 # a real merge would be required, just checkout the revision
990 # a real merge would be required, just checkout the revision
991 rawcheckout()
991 rawcheckout()
992
992
993 def commit(self, text, user, date):
993 def commit(self, text, user, date):
994 if self._gitmissing():
994 if self._gitmissing():
995 raise util.Abort(_("subrepo %s is missing") % self._relpath)
995 raise util.Abort(_("subrepo %s is missing") % self._relpath)
996 cmd = ['commit', '-a', '-m', text]
996 cmd = ['commit', '-a', '-m', text]
997 env = os.environ.copy()
997 env = os.environ.copy()
998 if user:
998 if user:
999 cmd += ['--author', user]
999 cmd += ['--author', user]
1000 if date:
1000 if date:
1001 # git's date parser silently ignores when seconds < 1e9
1001 # git's date parser silently ignores when seconds < 1e9
1002 # convert to ISO8601
1002 # convert to ISO8601
1003 env['GIT_AUTHOR_DATE'] = util.datestr(date,
1003 env['GIT_AUTHOR_DATE'] = util.datestr(date,
1004 '%Y-%m-%dT%H:%M:%S %1%2')
1004 '%Y-%m-%dT%H:%M:%S %1%2')
1005 self._gitcommand(cmd, env=env)
1005 self._gitcommand(cmd, env=env)
1006 # make sure commit works otherwise HEAD might not exist under certain
1006 # make sure commit works otherwise HEAD might not exist under certain
1007 # circumstances
1007 # circumstances
1008 return self._gitstate()
1008 return self._gitstate()
1009
1009
1010 def merge(self, state):
1010 def merge(self, state):
1011 source, revision, kind = state
1011 source, revision, kind = state
1012 self._fetch(source, revision)
1012 self._fetch(source, revision)
1013 base = self._gitcommand(['merge-base', revision, self._state[1]])
1013 base = self._gitcommand(['merge-base', revision, self._state[1]])
1014 self._gitupdatestat()
1014 self._gitupdatestat()
1015 out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
1015 out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
1016
1016
1017 def mergefunc():
1017 def mergefunc():
1018 if base == revision:
1018 if base == revision:
1019 self.get(state) # fast forward merge
1019 self.get(state) # fast forward merge
1020 elif base != self._state[1]:
1020 elif base != self._state[1]:
1021 self._gitcommand(['merge', '--no-commit', revision])
1021 self._gitcommand(['merge', '--no-commit', revision])
1022
1022
1023 if self.dirty():
1023 if self.dirty():
1024 if self._gitstate() != revision:
1024 if self._gitstate() != revision:
1025 dirty = self._gitstate() == self._state[1] or code != 0
1025 dirty = self._gitstate() == self._state[1] or code != 0
1026 if _updateprompt(self._ui, self, dirty,
1026 if _updateprompt(self._ui, self, dirty,
1027 self._state[1][:7], revision[:7]):
1027 self._state[1][:7], revision[:7]):
1028 mergefunc()
1028 mergefunc()
1029 else:
1029 else:
1030 mergefunc()
1030 mergefunc()
1031
1031
1032 def push(self, opts):
1032 def push(self, opts):
1033 force = opts.get('force')
1033 force = opts.get('force')
1034
1034
1035 if not self._state[1]:
1035 if not self._state[1]:
1036 return True
1036 return True
1037 if self._gitmissing():
1037 if self._gitmissing():
1038 raise util.Abort(_("subrepo %s is missing") % self._relpath)
1038 raise util.Abort(_("subrepo %s is missing") % self._relpath)
1039 # if a branch in origin contains the revision, nothing to do
1039 # if a branch in origin contains the revision, nothing to do
1040 branch2rev, rev2branch = self._gitbranchmap()
1040 branch2rev, rev2branch = self._gitbranchmap()
1041 if self._state[1] in rev2branch:
1041 if self._state[1] in rev2branch:
1042 for b in rev2branch[self._state[1]]:
1042 for b in rev2branch[self._state[1]]:
1043 if b.startswith('refs/remotes/origin/'):
1043 if b.startswith('refs/remotes/origin/'):
1044 return True
1044 return True
1045 for b, revision in branch2rev.iteritems():
1045 for b, revision in branch2rev.iteritems():
1046 if b.startswith('refs/remotes/origin/'):
1046 if b.startswith('refs/remotes/origin/'):
1047 if self._gitisancestor(self._state[1], revision):
1047 if self._gitisancestor(self._state[1], revision):
1048 return True
1048 return True
1049 # otherwise, try to push the currently checked out branch
1049 # otherwise, try to push the currently checked out branch
1050 cmd = ['push']
1050 cmd = ['push']
1051 if force:
1051 if force:
1052 cmd.append('--force')
1052 cmd.append('--force')
1053
1053
1054 current = self._gitcurrentbranch()
1054 current = self._gitcurrentbranch()
1055 if current:
1055 if current:
1056 # determine if the current branch is even useful
1056 # determine if the current branch is even useful
1057 if not self._gitisancestor(self._state[1], current):
1057 if not self._gitisancestor(self._state[1], current):
1058 self._ui.warn(_('unrelated git branch checked out '
1058 self._ui.warn(_('unrelated git branch checked out '
1059 'in subrepo %s\n') % self._relpath)
1059 'in subrepo %s\n') % self._relpath)
1060 return False
1060 return False
1061 self._ui.status(_('pushing branch %s of subrepo %s\n') %
1061 self._ui.status(_('pushing branch %s of subrepo %s\n') %
1062 (current.split('/', 2)[2], self._relpath))
1062 (current.split('/', 2)[2], self._relpath))
1063 self._gitcommand(cmd + ['origin', current])
1063 self._gitcommand(cmd + ['origin', current])
1064 return True
1064 return True
1065 else:
1065 else:
1066 self._ui.warn(_('no branch checked out in subrepo %s\n'
1066 self._ui.warn(_('no branch checked out in subrepo %s\n'
1067 'cannot push revision %s') %
1067 'cannot push revision %s') %
1068 (self._relpath, self._state[1]))
1068 (self._relpath, self._state[1]))
1069 return False
1069 return False
1070
1070
1071 def remove(self):
1071 def remove(self):
1072 if self._gitmissing():
1072 if self._gitmissing():
1073 return
1073 return
1074 if self.dirty():
1074 if self.dirty():
1075 self._ui.warn(_('not removing repo %s because '
1075 self._ui.warn(_('not removing repo %s because '
1076 'it has changes.\n') % self._relpath)
1076 'it has changes.\n') % self._relpath)
1077 return
1077 return
1078 # we can't fully delete the repository as it may contain
1078 # we can't fully delete the repository as it may contain
1079 # local-only history
1079 # local-only history
1080 self._ui.note(_('removing subrepo %s\n') % self._relpath)
1080 self._ui.note(_('removing subrepo %s\n') % self._relpath)
1081 self._gitcommand(['config', 'core.bare', 'true'])
1081 self._gitcommand(['config', 'core.bare', 'true'])
1082 for f in os.listdir(self._abspath):
1082 for f in os.listdir(self._abspath):
1083 if f == '.git':
1083 if f == '.git':
1084 continue
1084 continue
1085 path = os.path.join(self._abspath, f)
1085 path = os.path.join(self._abspath, f)
1086 if os.path.isdir(path) and not os.path.islink(path):
1086 if os.path.isdir(path) and not os.path.islink(path):
1087 shutil.rmtree(path)
1087 shutil.rmtree(path)
1088 else:
1088 else:
1089 os.remove(path)
1089 os.remove(path)
1090
1090
1091 def archive(self, ui, archiver, prefix):
1091 def archive(self, ui, archiver, prefix):
1092 source, revision = self._state
1092 source, revision = self._state
1093 if not revision:
1093 if not revision:
1094 return
1094 return
1095 self._fetch(source, revision)
1095 self._fetch(source, revision)
1096
1096
1097 # Parse git's native archive command.
1097 # Parse git's native archive command.
1098 # This should be much faster than manually traversing the trees
1098 # This should be much faster than manually traversing the trees
1099 # and objects with many subprocess calls.
1099 # and objects with many subprocess calls.
1100 tarstream = self._gitcommand(['archive', revision], stream=True)
1100 tarstream = self._gitcommand(['archive', revision], stream=True)
1101 tar = tarfile.open(fileobj=tarstream, mode='r|')
1101 tar = tarfile.open(fileobj=tarstream, mode='r|')
1102 relpath = subrelpath(self)
1102 relpath = subrelpath(self)
1103 ui.progress(_('archiving (%s)') % relpath, 0, unit=_('files'))
1103 ui.progress(_('archiving (%s)') % relpath, 0, unit=_('files'))
1104 for i, info in enumerate(tar):
1104 for i, info in enumerate(tar):
1105 if info.isdir():
1105 if info.isdir():
1106 continue
1106 continue
1107 if info.issym():
1107 if info.issym():
1108 data = info.linkname
1108 data = info.linkname
1109 else:
1109 else:
1110 data = tar.extractfile(info).read()
1110 data = tar.extractfile(info).read()
1111 archiver.addfile(os.path.join(prefix, self._path, info.name),
1111 archiver.addfile(os.path.join(prefix, self._path, info.name),
1112 info.mode, info.issym(), data)
1112 info.mode, info.issym(), data)
1113 ui.progress(_('archiving (%s)') % relpath, i + 1,
1113 ui.progress(_('archiving (%s)') % relpath, i + 1,
1114 unit=_('files'))
1114 unit=_('files'))
1115 ui.progress(_('archiving (%s)') % relpath, None)
1115 ui.progress(_('archiving (%s)') % relpath, None)
1116
1116
1117
1117
1118 def status(self, rev2, **opts):
1118 def status(self, rev2, **opts):
1119 rev1 = self._state[1]
1119 rev1 = self._state[1]
1120 if self._gitmissing() or not rev1:
1120 if self._gitmissing() or not rev1:
1121 # if the repo is missing, return no results
1121 # if the repo is missing, return no results
1122 return [], [], [], [], [], [], []
1122 return [], [], [], [], [], [], []
1123 modified, added, removed = [], [], []
1123 modified, added, removed = [], [], []
1124 self._gitupdatestat()
1124 self._gitupdatestat()
1125 if rev2:
1125 if rev2:
1126 command = ['diff-tree', rev1, rev2]
1126 command = ['diff-tree', rev1, rev2]
1127 else:
1127 else:
1128 command = ['diff-index', rev1]
1128 command = ['diff-index', rev1]
1129 out = self._gitcommand(command)
1129 out = self._gitcommand(command)
1130 for line in out.split('\n'):
1130 for line in out.split('\n'):
1131 tab = line.find('\t')
1131 tab = line.find('\t')
1132 if tab == -1:
1132 if tab == -1:
1133 continue
1133 continue
1134 status, f = line[tab - 1], line[tab + 1:]
1134 status, f = line[tab - 1], line[tab + 1:]
1135 if status == 'M':
1135 if status == 'M':
1136 modified.append(f)
1136 modified.append(f)
1137 elif status == 'A':
1137 elif status == 'A':
1138 added.append(f)
1138 added.append(f)
1139 elif status == 'D':
1139 elif status == 'D':
1140 removed.append(f)
1140 removed.append(f)
1141
1141
1142 deleted = unknown = ignored = clean = []
1142 deleted = unknown = ignored = clean = []
1143 return modified, added, removed, deleted, unknown, ignored, clean
1143 return modified, added, removed, deleted, unknown, ignored, clean
1144
1144
1145 types = {
1145 types = {
1146 'hg': hgsubrepo,
1146 'hg': hgsubrepo,
1147 'svn': svnsubrepo,
1147 'svn': svnsubrepo,
1148 'git': gitsubrepo,
1148 'git': gitsubrepo,
1149 }
1149 }
General Comments 0
You need to be logged in to leave comments. Login now