##// END OF EJS Templates
subrepo: print paths relative to upper repo root for push/pull/commit...
Edouard Gomez -
r11112:4a9bee61 default
parent child Browse files
Show More
@@ -1,2261 +1,2263
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo
10 import repo, changegroup, subrepo
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 import url as urlmod
17 import url as urlmod
18 from lock import release
18 from lock import release
19 import weakref, stat, errno, os, time, inspect
19 import weakref, stat, errno, os, time, inspect
20 propertycache = util.propertycache
20 propertycache = util.propertycache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
24 supported = set('revlogv1 store fncache shared'.split())
24 supported = set('revlogv1 store fncache shared'.split())
25
25
26 def __init__(self, baseui, path=None, create=0):
26 def __init__(self, baseui, path=None, create=0):
27 repo.repository.__init__(self)
27 repo.repository.__init__(self)
28 self.root = os.path.realpath(path)
28 self.root = os.path.realpath(path)
29 self.path = os.path.join(self.root, ".hg")
29 self.path = os.path.join(self.root, ".hg")
30 self.origroot = path
30 self.origroot = path
31 self.opener = util.opener(self.path)
31 self.opener = util.opener(self.path)
32 self.wopener = util.opener(self.root)
32 self.wopener = util.opener(self.root)
33 self.baseui = baseui
33 self.baseui = baseui
34 self.ui = baseui.copy()
34 self.ui = baseui.copy()
35
35
36 try:
36 try:
37 self.ui.readconfig(self.join("hgrc"), self.root)
37 self.ui.readconfig(self.join("hgrc"), self.root)
38 extensions.loadall(self.ui)
38 extensions.loadall(self.ui)
39 except IOError:
39 except IOError:
40 pass
40 pass
41
41
42 if not os.path.isdir(self.path):
42 if not os.path.isdir(self.path):
43 if create:
43 if create:
44 if not os.path.exists(path):
44 if not os.path.exists(path):
45 os.mkdir(path)
45 os.mkdir(path)
46 os.mkdir(self.path)
46 os.mkdir(self.path)
47 requirements = ["revlogv1"]
47 requirements = ["revlogv1"]
48 if self.ui.configbool('format', 'usestore', True):
48 if self.ui.configbool('format', 'usestore', True):
49 os.mkdir(os.path.join(self.path, "store"))
49 os.mkdir(os.path.join(self.path, "store"))
50 requirements.append("store")
50 requirements.append("store")
51 if self.ui.configbool('format', 'usefncache', True):
51 if self.ui.configbool('format', 'usefncache', True):
52 requirements.append("fncache")
52 requirements.append("fncache")
53 # create an invalid changelog
53 # create an invalid changelog
54 self.opener("00changelog.i", "a").write(
54 self.opener("00changelog.i", "a").write(
55 '\0\0\0\2' # represents revlogv2
55 '\0\0\0\2' # represents revlogv2
56 ' dummy changelog to prevent using the old repo layout'
56 ' dummy changelog to prevent using the old repo layout'
57 )
57 )
58 reqfile = self.opener("requires", "w")
58 reqfile = self.opener("requires", "w")
59 for r in requirements:
59 for r in requirements:
60 reqfile.write("%s\n" % r)
60 reqfile.write("%s\n" % r)
61 reqfile.close()
61 reqfile.close()
62 else:
62 else:
63 raise error.RepoError(_("repository %s not found") % path)
63 raise error.RepoError(_("repository %s not found") % path)
64 elif create:
64 elif create:
65 raise error.RepoError(_("repository %s already exists") % path)
65 raise error.RepoError(_("repository %s already exists") % path)
66 else:
66 else:
67 # find requirements
67 # find requirements
68 requirements = set()
68 requirements = set()
69 try:
69 try:
70 requirements = set(self.opener("requires").read().splitlines())
70 requirements = set(self.opener("requires").read().splitlines())
71 except IOError, inst:
71 except IOError, inst:
72 if inst.errno != errno.ENOENT:
72 if inst.errno != errno.ENOENT:
73 raise
73 raise
74 for r in requirements - self.supported:
74 for r in requirements - self.supported:
75 raise error.RepoError(_("requirement '%s' not supported") % r)
75 raise error.RepoError(_("requirement '%s' not supported") % r)
76
76
77 self.sharedpath = self.path
77 self.sharedpath = self.path
78 try:
78 try:
79 s = os.path.realpath(self.opener("sharedpath").read())
79 s = os.path.realpath(self.opener("sharedpath").read())
80 if not os.path.exists(s):
80 if not os.path.exists(s):
81 raise error.RepoError(
81 raise error.RepoError(
82 _('.hg/sharedpath points to nonexistent directory %s') % s)
82 _('.hg/sharedpath points to nonexistent directory %s') % s)
83 self.sharedpath = s
83 self.sharedpath = s
84 except IOError, inst:
84 except IOError, inst:
85 if inst.errno != errno.ENOENT:
85 if inst.errno != errno.ENOENT:
86 raise
86 raise
87
87
88 self.store = store.store(requirements, self.sharedpath, util.opener)
88 self.store = store.store(requirements, self.sharedpath, util.opener)
89 self.spath = self.store.path
89 self.spath = self.store.path
90 self.sopener = self.store.opener
90 self.sopener = self.store.opener
91 self.sjoin = self.store.join
91 self.sjoin = self.store.join
92 self.opener.createmode = self.store.createmode
92 self.opener.createmode = self.store.createmode
93 self.sopener.options = {}
93 self.sopener.options = {}
94
94
95 # These two define the set of tags for this repository. _tags
95 # These two define the set of tags for this repository. _tags
96 # maps tag name to node; _tagtypes maps tag name to 'global' or
96 # maps tag name to node; _tagtypes maps tag name to 'global' or
97 # 'local'. (Global tags are defined by .hgtags across all
97 # 'local'. (Global tags are defined by .hgtags across all
98 # heads, and local tags are defined in .hg/localtags.) They
98 # heads, and local tags are defined in .hg/localtags.) They
99 # constitute the in-memory cache of tags.
99 # constitute the in-memory cache of tags.
100 self._tags = None
100 self._tags = None
101 self._tagtypes = None
101 self._tagtypes = None
102
102
103 self._branchcache = None # in UTF-8
103 self._branchcache = None # in UTF-8
104 self._branchcachetip = None
104 self._branchcachetip = None
105 self.nodetagscache = None
105 self.nodetagscache = None
106 self.filterpats = {}
106 self.filterpats = {}
107 self._datafilters = {}
107 self._datafilters = {}
108 self._transref = self._lockref = self._wlockref = None
108 self._transref = self._lockref = self._wlockref = None
109
109
110 @propertycache
110 @propertycache
111 def changelog(self):
111 def changelog(self):
112 c = changelog.changelog(self.sopener)
112 c = changelog.changelog(self.sopener)
113 if 'HG_PENDING' in os.environ:
113 if 'HG_PENDING' in os.environ:
114 p = os.environ['HG_PENDING']
114 p = os.environ['HG_PENDING']
115 if p.startswith(self.root):
115 if p.startswith(self.root):
116 c.readpending('00changelog.i.a')
116 c.readpending('00changelog.i.a')
117 self.sopener.options['defversion'] = c.version
117 self.sopener.options['defversion'] = c.version
118 return c
118 return c
119
119
120 @propertycache
120 @propertycache
121 def manifest(self):
121 def manifest(self):
122 return manifest.manifest(self.sopener)
122 return manifest.manifest(self.sopener)
123
123
124 @propertycache
124 @propertycache
125 def dirstate(self):
125 def dirstate(self):
126 return dirstate.dirstate(self.opener, self.ui, self.root)
126 return dirstate.dirstate(self.opener, self.ui, self.root)
127
127
128 def __getitem__(self, changeid):
128 def __getitem__(self, changeid):
129 if changeid is None:
129 if changeid is None:
130 return context.workingctx(self)
130 return context.workingctx(self)
131 return context.changectx(self, changeid)
131 return context.changectx(self, changeid)
132
132
133 def __contains__(self, changeid):
133 def __contains__(self, changeid):
134 try:
134 try:
135 return bool(self.lookup(changeid))
135 return bool(self.lookup(changeid))
136 except error.RepoLookupError:
136 except error.RepoLookupError:
137 return False
137 return False
138
138
139 def __nonzero__(self):
139 def __nonzero__(self):
140 return True
140 return True
141
141
142 def __len__(self):
142 def __len__(self):
143 return len(self.changelog)
143 return len(self.changelog)
144
144
145 def __iter__(self):
145 def __iter__(self):
146 for i in xrange(len(self)):
146 for i in xrange(len(self)):
147 yield i
147 yield i
148
148
149 def url(self):
149 def url(self):
150 return 'file:' + self.root
150 return 'file:' + self.root
151
151
152 def hook(self, name, throw=False, **args):
152 def hook(self, name, throw=False, **args):
153 return hook.hook(self.ui, self, name, throw, **args)
153 return hook.hook(self.ui, self, name, throw, **args)
154
154
155 tag_disallowed = ':\r\n'
155 tag_disallowed = ':\r\n'
156
156
157 def _tag(self, names, node, message, local, user, date, extra={}):
157 def _tag(self, names, node, message, local, user, date, extra={}):
158 if isinstance(names, str):
158 if isinstance(names, str):
159 allchars = names
159 allchars = names
160 names = (names,)
160 names = (names,)
161 else:
161 else:
162 allchars = ''.join(names)
162 allchars = ''.join(names)
163 for c in self.tag_disallowed:
163 for c in self.tag_disallowed:
164 if c in allchars:
164 if c in allchars:
165 raise util.Abort(_('%r cannot be used in a tag name') % c)
165 raise util.Abort(_('%r cannot be used in a tag name') % c)
166
166
167 branches = self.branchmap()
167 branches = self.branchmap()
168 for name in names:
168 for name in names:
169 self.hook('pretag', throw=True, node=hex(node), tag=name,
169 self.hook('pretag', throw=True, node=hex(node), tag=name,
170 local=local)
170 local=local)
171 if name in branches:
171 if name in branches:
172 self.ui.warn(_("warning: tag %s conflicts with existing"
172 self.ui.warn(_("warning: tag %s conflicts with existing"
173 " branch name\n") % name)
173 " branch name\n") % name)
174
174
175 def writetags(fp, names, munge, prevtags):
175 def writetags(fp, names, munge, prevtags):
176 fp.seek(0, 2)
176 fp.seek(0, 2)
177 if prevtags and prevtags[-1] != '\n':
177 if prevtags and prevtags[-1] != '\n':
178 fp.write('\n')
178 fp.write('\n')
179 for name in names:
179 for name in names:
180 m = munge and munge(name) or name
180 m = munge and munge(name) or name
181 if self._tagtypes and name in self._tagtypes:
181 if self._tagtypes and name in self._tagtypes:
182 old = self._tags.get(name, nullid)
182 old = self._tags.get(name, nullid)
183 fp.write('%s %s\n' % (hex(old), m))
183 fp.write('%s %s\n' % (hex(old), m))
184 fp.write('%s %s\n' % (hex(node), m))
184 fp.write('%s %s\n' % (hex(node), m))
185 fp.close()
185 fp.close()
186
186
187 prevtags = ''
187 prevtags = ''
188 if local:
188 if local:
189 try:
189 try:
190 fp = self.opener('localtags', 'r+')
190 fp = self.opener('localtags', 'r+')
191 except IOError:
191 except IOError:
192 fp = self.opener('localtags', 'a')
192 fp = self.opener('localtags', 'a')
193 else:
193 else:
194 prevtags = fp.read()
194 prevtags = fp.read()
195
195
196 # local tags are stored in the current charset
196 # local tags are stored in the current charset
197 writetags(fp, names, None, prevtags)
197 writetags(fp, names, None, prevtags)
198 for name in names:
198 for name in names:
199 self.hook('tag', node=hex(node), tag=name, local=local)
199 self.hook('tag', node=hex(node), tag=name, local=local)
200 return
200 return
201
201
202 try:
202 try:
203 fp = self.wfile('.hgtags', 'rb+')
203 fp = self.wfile('.hgtags', 'rb+')
204 except IOError:
204 except IOError:
205 fp = self.wfile('.hgtags', 'ab')
205 fp = self.wfile('.hgtags', 'ab')
206 else:
206 else:
207 prevtags = fp.read()
207 prevtags = fp.read()
208
208
209 # committed tags are stored in UTF-8
209 # committed tags are stored in UTF-8
210 writetags(fp, names, encoding.fromlocal, prevtags)
210 writetags(fp, names, encoding.fromlocal, prevtags)
211
211
212 if '.hgtags' not in self.dirstate:
212 if '.hgtags' not in self.dirstate:
213 self.add(['.hgtags'])
213 self.add(['.hgtags'])
214
214
215 m = matchmod.exact(self.root, '', ['.hgtags'])
215 m = matchmod.exact(self.root, '', ['.hgtags'])
216 tagnode = self.commit(message, user, date, extra=extra, match=m)
216 tagnode = self.commit(message, user, date, extra=extra, match=m)
217
217
218 for name in names:
218 for name in names:
219 self.hook('tag', node=hex(node), tag=name, local=local)
219 self.hook('tag', node=hex(node), tag=name, local=local)
220
220
221 return tagnode
221 return tagnode
222
222
223 def tag(self, names, node, message, local, user, date):
223 def tag(self, names, node, message, local, user, date):
224 '''tag a revision with one or more symbolic names.
224 '''tag a revision with one or more symbolic names.
225
225
226 names is a list of strings or, when adding a single tag, names may be a
226 names is a list of strings or, when adding a single tag, names may be a
227 string.
227 string.
228
228
229 if local is True, the tags are stored in a per-repository file.
229 if local is True, the tags are stored in a per-repository file.
230 otherwise, they are stored in the .hgtags file, and a new
230 otherwise, they are stored in the .hgtags file, and a new
231 changeset is committed with the change.
231 changeset is committed with the change.
232
232
233 keyword arguments:
233 keyword arguments:
234
234
235 local: whether to store tags in non-version-controlled file
235 local: whether to store tags in non-version-controlled file
236 (default False)
236 (default False)
237
237
238 message: commit message to use if committing
238 message: commit message to use if committing
239
239
240 user: name of user to use if committing
240 user: name of user to use if committing
241
241
242 date: date tuple to use if committing'''
242 date: date tuple to use if committing'''
243
243
244 for x in self.status()[:5]:
244 for x in self.status()[:5]:
245 if '.hgtags' in x:
245 if '.hgtags' in x:
246 raise util.Abort(_('working copy of .hgtags is changed '
246 raise util.Abort(_('working copy of .hgtags is changed '
247 '(please commit .hgtags manually)'))
247 '(please commit .hgtags manually)'))
248
248
249 self.tags() # instantiate the cache
249 self.tags() # instantiate the cache
250 self._tag(names, node, message, local, user, date)
250 self._tag(names, node, message, local, user, date)
251
251
252 def tags(self):
252 def tags(self):
253 '''return a mapping of tag to node'''
253 '''return a mapping of tag to node'''
254 if self._tags is None:
254 if self._tags is None:
255 (self._tags, self._tagtypes) = self._findtags()
255 (self._tags, self._tagtypes) = self._findtags()
256
256
257 return self._tags
257 return self._tags
258
258
259 def _findtags(self):
259 def _findtags(self):
260 '''Do the hard work of finding tags. Return a pair of dicts
260 '''Do the hard work of finding tags. Return a pair of dicts
261 (tags, tagtypes) where tags maps tag name to node, and tagtypes
261 (tags, tagtypes) where tags maps tag name to node, and tagtypes
262 maps tag name to a string like \'global\' or \'local\'.
262 maps tag name to a string like \'global\' or \'local\'.
263 Subclasses or extensions are free to add their own tags, but
263 Subclasses or extensions are free to add their own tags, but
264 should be aware that the returned dicts will be retained for the
264 should be aware that the returned dicts will be retained for the
265 duration of the localrepo object.'''
265 duration of the localrepo object.'''
266
266
267 # XXX what tagtype should subclasses/extensions use? Currently
267 # XXX what tagtype should subclasses/extensions use? Currently
268 # mq and bookmarks add tags, but do not set the tagtype at all.
268 # mq and bookmarks add tags, but do not set the tagtype at all.
269 # Should each extension invent its own tag type? Should there
269 # Should each extension invent its own tag type? Should there
270 # be one tagtype for all such "virtual" tags? Or is the status
270 # be one tagtype for all such "virtual" tags? Or is the status
271 # quo fine?
271 # quo fine?
272
272
273 alltags = {} # map tag name to (node, hist)
273 alltags = {} # map tag name to (node, hist)
274 tagtypes = {}
274 tagtypes = {}
275
275
276 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
276 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
277 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
277 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
278
278
279 # Build the return dicts. Have to re-encode tag names because
279 # Build the return dicts. Have to re-encode tag names because
280 # the tags module always uses UTF-8 (in order not to lose info
280 # the tags module always uses UTF-8 (in order not to lose info
281 # writing to the cache), but the rest of Mercurial wants them in
281 # writing to the cache), but the rest of Mercurial wants them in
282 # local encoding.
282 # local encoding.
283 tags = {}
283 tags = {}
284 for (name, (node, hist)) in alltags.iteritems():
284 for (name, (node, hist)) in alltags.iteritems():
285 if node != nullid:
285 if node != nullid:
286 tags[encoding.tolocal(name)] = node
286 tags[encoding.tolocal(name)] = node
287 tags['tip'] = self.changelog.tip()
287 tags['tip'] = self.changelog.tip()
288 tagtypes = dict([(encoding.tolocal(name), value)
288 tagtypes = dict([(encoding.tolocal(name), value)
289 for (name, value) in tagtypes.iteritems()])
289 for (name, value) in tagtypes.iteritems()])
290 return (tags, tagtypes)
290 return (tags, tagtypes)
291
291
292 def tagtype(self, tagname):
292 def tagtype(self, tagname):
293 '''
293 '''
294 return the type of the given tag. result can be:
294 return the type of the given tag. result can be:
295
295
296 'local' : a local tag
296 'local' : a local tag
297 'global' : a global tag
297 'global' : a global tag
298 None : tag does not exist
298 None : tag does not exist
299 '''
299 '''
300
300
301 self.tags()
301 self.tags()
302
302
303 return self._tagtypes.get(tagname)
303 return self._tagtypes.get(tagname)
304
304
305 def tagslist(self):
305 def tagslist(self):
306 '''return a list of tags ordered by revision'''
306 '''return a list of tags ordered by revision'''
307 l = []
307 l = []
308 for t, n in self.tags().iteritems():
308 for t, n in self.tags().iteritems():
309 try:
309 try:
310 r = self.changelog.rev(n)
310 r = self.changelog.rev(n)
311 except:
311 except:
312 r = -2 # sort to the beginning of the list if unknown
312 r = -2 # sort to the beginning of the list if unknown
313 l.append((r, t, n))
313 l.append((r, t, n))
314 return [(t, n) for r, t, n in sorted(l)]
314 return [(t, n) for r, t, n in sorted(l)]
315
315
316 def nodetags(self, node):
316 def nodetags(self, node):
317 '''return the tags associated with a node'''
317 '''return the tags associated with a node'''
318 if not self.nodetagscache:
318 if not self.nodetagscache:
319 self.nodetagscache = {}
319 self.nodetagscache = {}
320 for t, n in self.tags().iteritems():
320 for t, n in self.tags().iteritems():
321 self.nodetagscache.setdefault(n, []).append(t)
321 self.nodetagscache.setdefault(n, []).append(t)
322 for tags in self.nodetagscache.itervalues():
322 for tags in self.nodetagscache.itervalues():
323 tags.sort()
323 tags.sort()
324 return self.nodetagscache.get(node, [])
324 return self.nodetagscache.get(node, [])
325
325
326 def _branchtags(self, partial, lrev):
326 def _branchtags(self, partial, lrev):
327 # TODO: rename this function?
327 # TODO: rename this function?
328 tiprev = len(self) - 1
328 tiprev = len(self) - 1
329 if lrev != tiprev:
329 if lrev != tiprev:
330 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
330 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
331 self._updatebranchcache(partial, ctxgen)
331 self._updatebranchcache(partial, ctxgen)
332 self._writebranchcache(partial, self.changelog.tip(), tiprev)
332 self._writebranchcache(partial, self.changelog.tip(), tiprev)
333
333
334 return partial
334 return partial
335
335
336 def branchmap(self):
336 def branchmap(self):
337 '''returns a dictionary {branch: [branchheads]}'''
337 '''returns a dictionary {branch: [branchheads]}'''
338 tip = self.changelog.tip()
338 tip = self.changelog.tip()
339 if self._branchcache is not None and self._branchcachetip == tip:
339 if self._branchcache is not None and self._branchcachetip == tip:
340 return self._branchcache
340 return self._branchcache
341
341
342 oldtip = self._branchcachetip
342 oldtip = self._branchcachetip
343 self._branchcachetip = tip
343 self._branchcachetip = tip
344 if oldtip is None or oldtip not in self.changelog.nodemap:
344 if oldtip is None or oldtip not in self.changelog.nodemap:
345 partial, last, lrev = self._readbranchcache()
345 partial, last, lrev = self._readbranchcache()
346 else:
346 else:
347 lrev = self.changelog.rev(oldtip)
347 lrev = self.changelog.rev(oldtip)
348 partial = self._branchcache
348 partial = self._branchcache
349
349
350 self._branchtags(partial, lrev)
350 self._branchtags(partial, lrev)
351 # this private cache holds all heads (not just tips)
351 # this private cache holds all heads (not just tips)
352 self._branchcache = partial
352 self._branchcache = partial
353
353
354 return self._branchcache
354 return self._branchcache
355
355
356 def branchtags(self):
356 def branchtags(self):
357 '''return a dict where branch names map to the tipmost head of
357 '''return a dict where branch names map to the tipmost head of
358 the branch, open heads come before closed'''
358 the branch, open heads come before closed'''
359 bt = {}
359 bt = {}
360 for bn, heads in self.branchmap().iteritems():
360 for bn, heads in self.branchmap().iteritems():
361 tip = heads[-1]
361 tip = heads[-1]
362 for h in reversed(heads):
362 for h in reversed(heads):
363 if 'close' not in self.changelog.read(h)[5]:
363 if 'close' not in self.changelog.read(h)[5]:
364 tip = h
364 tip = h
365 break
365 break
366 bt[bn] = tip
366 bt[bn] = tip
367 return bt
367 return bt
368
368
369
369
370 def _readbranchcache(self):
370 def _readbranchcache(self):
371 partial = {}
371 partial = {}
372 try:
372 try:
373 f = self.opener("branchheads.cache")
373 f = self.opener("branchheads.cache")
374 lines = f.read().split('\n')
374 lines = f.read().split('\n')
375 f.close()
375 f.close()
376 except (IOError, OSError):
376 except (IOError, OSError):
377 return {}, nullid, nullrev
377 return {}, nullid, nullrev
378
378
379 try:
379 try:
380 last, lrev = lines.pop(0).split(" ", 1)
380 last, lrev = lines.pop(0).split(" ", 1)
381 last, lrev = bin(last), int(lrev)
381 last, lrev = bin(last), int(lrev)
382 if lrev >= len(self) or self[lrev].node() != last:
382 if lrev >= len(self) or self[lrev].node() != last:
383 # invalidate the cache
383 # invalidate the cache
384 raise ValueError('invalidating branch cache (tip differs)')
384 raise ValueError('invalidating branch cache (tip differs)')
385 for l in lines:
385 for l in lines:
386 if not l:
386 if not l:
387 continue
387 continue
388 node, label = l.split(" ", 1)
388 node, label = l.split(" ", 1)
389 partial.setdefault(label.strip(), []).append(bin(node))
389 partial.setdefault(label.strip(), []).append(bin(node))
390 except KeyboardInterrupt:
390 except KeyboardInterrupt:
391 raise
391 raise
392 except Exception, inst:
392 except Exception, inst:
393 if self.ui.debugflag:
393 if self.ui.debugflag:
394 self.ui.warn(str(inst), '\n')
394 self.ui.warn(str(inst), '\n')
395 partial, last, lrev = {}, nullid, nullrev
395 partial, last, lrev = {}, nullid, nullrev
396 return partial, last, lrev
396 return partial, last, lrev
397
397
398 def _writebranchcache(self, branches, tip, tiprev):
398 def _writebranchcache(self, branches, tip, tiprev):
399 try:
399 try:
400 f = self.opener("branchheads.cache", "w", atomictemp=True)
400 f = self.opener("branchheads.cache", "w", atomictemp=True)
401 f.write("%s %s\n" % (hex(tip), tiprev))
401 f.write("%s %s\n" % (hex(tip), tiprev))
402 for label, nodes in branches.iteritems():
402 for label, nodes in branches.iteritems():
403 for node in nodes:
403 for node in nodes:
404 f.write("%s %s\n" % (hex(node), label))
404 f.write("%s %s\n" % (hex(node), label))
405 f.rename()
405 f.rename()
406 except (IOError, OSError):
406 except (IOError, OSError):
407 pass
407 pass
408
408
409 def _updatebranchcache(self, partial, ctxgen):
409 def _updatebranchcache(self, partial, ctxgen):
410 # collect new branch entries
410 # collect new branch entries
411 newbranches = {}
411 newbranches = {}
412 for c in ctxgen:
412 for c in ctxgen:
413 newbranches.setdefault(c.branch(), []).append(c.node())
413 newbranches.setdefault(c.branch(), []).append(c.node())
414 # if older branchheads are reachable from new ones, they aren't
414 # if older branchheads are reachable from new ones, they aren't
415 # really branchheads. Note checking parents is insufficient:
415 # really branchheads. Note checking parents is insufficient:
416 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
416 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
417 for branch, newnodes in newbranches.iteritems():
417 for branch, newnodes in newbranches.iteritems():
418 bheads = partial.setdefault(branch, [])
418 bheads = partial.setdefault(branch, [])
419 bheads.extend(newnodes)
419 bheads.extend(newnodes)
420 if len(bheads) <= 1:
420 if len(bheads) <= 1:
421 continue
421 continue
422 # starting from tip means fewer passes over reachable
422 # starting from tip means fewer passes over reachable
423 while newnodes:
423 while newnodes:
424 latest = newnodes.pop()
424 latest = newnodes.pop()
425 if latest not in bheads:
425 if latest not in bheads:
426 continue
426 continue
427 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
427 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
428 reachable = self.changelog.reachable(latest, minbhrev)
428 reachable = self.changelog.reachable(latest, minbhrev)
429 reachable.remove(latest)
429 reachable.remove(latest)
430 bheads = [b for b in bheads if b not in reachable]
430 bheads = [b for b in bheads if b not in reachable]
431 partial[branch] = bheads
431 partial[branch] = bheads
432
432
433 def lookup(self, key):
433 def lookup(self, key):
434 if isinstance(key, int):
434 if isinstance(key, int):
435 return self.changelog.node(key)
435 return self.changelog.node(key)
436 elif key == '.':
436 elif key == '.':
437 return self.dirstate.parents()[0]
437 return self.dirstate.parents()[0]
438 elif key == 'null':
438 elif key == 'null':
439 return nullid
439 return nullid
440 elif key == 'tip':
440 elif key == 'tip':
441 return self.changelog.tip()
441 return self.changelog.tip()
442 n = self.changelog._match(key)
442 n = self.changelog._match(key)
443 if n:
443 if n:
444 return n
444 return n
445 if key in self.tags():
445 if key in self.tags():
446 return self.tags()[key]
446 return self.tags()[key]
447 if key in self.branchtags():
447 if key in self.branchtags():
448 return self.branchtags()[key]
448 return self.branchtags()[key]
449 n = self.changelog._partialmatch(key)
449 n = self.changelog._partialmatch(key)
450 if n:
450 if n:
451 return n
451 return n
452
452
453 # can't find key, check if it might have come from damaged dirstate
453 # can't find key, check if it might have come from damaged dirstate
454 if key in self.dirstate.parents():
454 if key in self.dirstate.parents():
455 raise error.Abort(_("working directory has unknown parent '%s'!")
455 raise error.Abort(_("working directory has unknown parent '%s'!")
456 % short(key))
456 % short(key))
457 try:
457 try:
458 if len(key) == 20:
458 if len(key) == 20:
459 key = hex(key)
459 key = hex(key)
460 except:
460 except:
461 pass
461 pass
462 raise error.RepoLookupError(_("unknown revision '%s'") % key)
462 raise error.RepoLookupError(_("unknown revision '%s'") % key)
463
463
464 def lookupbranch(self, key, remote=None):
464 def lookupbranch(self, key, remote=None):
465 repo = remote or self
465 repo = remote or self
466 if key in repo.branchmap():
466 if key in repo.branchmap():
467 return key
467 return key
468
468
469 repo = (remote and remote.local()) and remote or self
469 repo = (remote and remote.local()) and remote or self
470 return repo[key].branch()
470 return repo[key].branch()
471
471
472 def local(self):
472 def local(self):
473 return True
473 return True
474
474
475 def join(self, f):
475 def join(self, f):
476 return os.path.join(self.path, f)
476 return os.path.join(self.path, f)
477
477
478 def wjoin(self, f):
478 def wjoin(self, f):
479 return os.path.join(self.root, f)
479 return os.path.join(self.root, f)
480
480
481 def rjoin(self, f):
481 def rjoin(self, f):
482 return os.path.join(self.root, util.pconvert(f))
482 return os.path.join(self.root, util.pconvert(f))
483
483
484 def file(self, f):
484 def file(self, f):
485 if f[0] == '/':
485 if f[0] == '/':
486 f = f[1:]
486 f = f[1:]
487 return filelog.filelog(self.sopener, f)
487 return filelog.filelog(self.sopener, f)
488
488
489 def changectx(self, changeid):
489 def changectx(self, changeid):
490 return self[changeid]
490 return self[changeid]
491
491
492 def parents(self, changeid=None):
492 def parents(self, changeid=None):
493 '''get list of changectxs for parents of changeid'''
493 '''get list of changectxs for parents of changeid'''
494 return self[changeid].parents()
494 return self[changeid].parents()
495
495
496 def filectx(self, path, changeid=None, fileid=None):
496 def filectx(self, path, changeid=None, fileid=None):
497 """changeid can be a changeset revision, node, or tag.
497 """changeid can be a changeset revision, node, or tag.
498 fileid can be a file revision or node."""
498 fileid can be a file revision or node."""
499 return context.filectx(self, path, changeid, fileid)
499 return context.filectx(self, path, changeid, fileid)
500
500
501 def getcwd(self):
501 def getcwd(self):
502 return self.dirstate.getcwd()
502 return self.dirstate.getcwd()
503
503
504 def pathto(self, f, cwd=None):
504 def pathto(self, f, cwd=None):
505 return self.dirstate.pathto(f, cwd)
505 return self.dirstate.pathto(f, cwd)
506
506
507 def wfile(self, f, mode='r'):
507 def wfile(self, f, mode='r'):
508 return self.wopener(f, mode)
508 return self.wopener(f, mode)
509
509
510 def _link(self, f):
510 def _link(self, f):
511 return os.path.islink(self.wjoin(f))
511 return os.path.islink(self.wjoin(f))
512
512
513 def _filter(self, filter, filename, data):
513 def _filter(self, filter, filename, data):
514 if filter not in self.filterpats:
514 if filter not in self.filterpats:
515 l = []
515 l = []
516 for pat, cmd in self.ui.configitems(filter):
516 for pat, cmd in self.ui.configitems(filter):
517 if cmd == '!':
517 if cmd == '!':
518 continue
518 continue
519 mf = matchmod.match(self.root, '', [pat])
519 mf = matchmod.match(self.root, '', [pat])
520 fn = None
520 fn = None
521 params = cmd
521 params = cmd
522 for name, filterfn in self._datafilters.iteritems():
522 for name, filterfn in self._datafilters.iteritems():
523 if cmd.startswith(name):
523 if cmd.startswith(name):
524 fn = filterfn
524 fn = filterfn
525 params = cmd[len(name):].lstrip()
525 params = cmd[len(name):].lstrip()
526 break
526 break
527 if not fn:
527 if not fn:
528 fn = lambda s, c, **kwargs: util.filter(s, c)
528 fn = lambda s, c, **kwargs: util.filter(s, c)
529 # Wrap old filters not supporting keyword arguments
529 # Wrap old filters not supporting keyword arguments
530 if not inspect.getargspec(fn)[2]:
530 if not inspect.getargspec(fn)[2]:
531 oldfn = fn
531 oldfn = fn
532 fn = lambda s, c, **kwargs: oldfn(s, c)
532 fn = lambda s, c, **kwargs: oldfn(s, c)
533 l.append((mf, fn, params))
533 l.append((mf, fn, params))
534 self.filterpats[filter] = l
534 self.filterpats[filter] = l
535
535
536 for mf, fn, cmd in self.filterpats[filter]:
536 for mf, fn, cmd in self.filterpats[filter]:
537 if mf(filename):
537 if mf(filename):
538 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
538 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
539 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
539 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
540 break
540 break
541
541
542 return data
542 return data
543
543
544 def adddatafilter(self, name, filter):
544 def adddatafilter(self, name, filter):
545 self._datafilters[name] = filter
545 self._datafilters[name] = filter
546
546
547 def wread(self, filename):
547 def wread(self, filename):
548 if self._link(filename):
548 if self._link(filename):
549 data = os.readlink(self.wjoin(filename))
549 data = os.readlink(self.wjoin(filename))
550 else:
550 else:
551 data = self.wopener(filename, 'r').read()
551 data = self.wopener(filename, 'r').read()
552 return self._filter("encode", filename, data)
552 return self._filter("encode", filename, data)
553
553
554 def wwrite(self, filename, data, flags):
554 def wwrite(self, filename, data, flags):
555 data = self._filter("decode", filename, data)
555 data = self._filter("decode", filename, data)
556 try:
556 try:
557 os.unlink(self.wjoin(filename))
557 os.unlink(self.wjoin(filename))
558 except OSError:
558 except OSError:
559 pass
559 pass
560 if 'l' in flags:
560 if 'l' in flags:
561 self.wopener.symlink(data, filename)
561 self.wopener.symlink(data, filename)
562 else:
562 else:
563 self.wopener(filename, 'w').write(data)
563 self.wopener(filename, 'w').write(data)
564 if 'x' in flags:
564 if 'x' in flags:
565 util.set_flags(self.wjoin(filename), False, True)
565 util.set_flags(self.wjoin(filename), False, True)
566
566
567 def wwritedata(self, filename, data):
567 def wwritedata(self, filename, data):
568 return self._filter("decode", filename, data)
568 return self._filter("decode", filename, data)
569
569
570 def transaction(self, desc):
570 def transaction(self, desc):
571 tr = self._transref and self._transref() or None
571 tr = self._transref and self._transref() or None
572 if tr and tr.running():
572 if tr and tr.running():
573 return tr.nest()
573 return tr.nest()
574
574
575 # abort here if the journal already exists
575 # abort here if the journal already exists
576 if os.path.exists(self.sjoin("journal")):
576 if os.path.exists(self.sjoin("journal")):
577 raise error.RepoError(
577 raise error.RepoError(
578 _("abandoned transaction found - run hg recover"))
578 _("abandoned transaction found - run hg recover"))
579
579
580 # save dirstate for rollback
580 # save dirstate for rollback
581 try:
581 try:
582 ds = self.opener("dirstate").read()
582 ds = self.opener("dirstate").read()
583 except IOError:
583 except IOError:
584 ds = ""
584 ds = ""
585 self.opener("journal.dirstate", "w").write(ds)
585 self.opener("journal.dirstate", "w").write(ds)
586 self.opener("journal.branch", "w").write(self.dirstate.branch())
586 self.opener("journal.branch", "w").write(self.dirstate.branch())
587 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
587 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
588
588
589 renames = [(self.sjoin("journal"), self.sjoin("undo")),
589 renames = [(self.sjoin("journal"), self.sjoin("undo")),
590 (self.join("journal.dirstate"), self.join("undo.dirstate")),
590 (self.join("journal.dirstate"), self.join("undo.dirstate")),
591 (self.join("journal.branch"), self.join("undo.branch")),
591 (self.join("journal.branch"), self.join("undo.branch")),
592 (self.join("journal.desc"), self.join("undo.desc"))]
592 (self.join("journal.desc"), self.join("undo.desc"))]
593 tr = transaction.transaction(self.ui.warn, self.sopener,
593 tr = transaction.transaction(self.ui.warn, self.sopener,
594 self.sjoin("journal"),
594 self.sjoin("journal"),
595 aftertrans(renames),
595 aftertrans(renames),
596 self.store.createmode)
596 self.store.createmode)
597 self._transref = weakref.ref(tr)
597 self._transref = weakref.ref(tr)
598 return tr
598 return tr
599
599
600 def recover(self):
600 def recover(self):
601 lock = self.lock()
601 lock = self.lock()
602 try:
602 try:
603 if os.path.exists(self.sjoin("journal")):
603 if os.path.exists(self.sjoin("journal")):
604 self.ui.status(_("rolling back interrupted transaction\n"))
604 self.ui.status(_("rolling back interrupted transaction\n"))
605 transaction.rollback(self.sopener, self.sjoin("journal"),
605 transaction.rollback(self.sopener, self.sjoin("journal"),
606 self.ui.warn)
606 self.ui.warn)
607 self.invalidate()
607 self.invalidate()
608 return True
608 return True
609 else:
609 else:
610 self.ui.warn(_("no interrupted transaction available\n"))
610 self.ui.warn(_("no interrupted transaction available\n"))
611 return False
611 return False
612 finally:
612 finally:
613 lock.release()
613 lock.release()
614
614
615 def rollback(self, dryrun=False):
615 def rollback(self, dryrun=False):
616 wlock = lock = None
616 wlock = lock = None
617 try:
617 try:
618 wlock = self.wlock()
618 wlock = self.wlock()
619 lock = self.lock()
619 lock = self.lock()
620 if os.path.exists(self.sjoin("undo")):
620 if os.path.exists(self.sjoin("undo")):
621 try:
621 try:
622 args = self.opener("undo.desc", "r").read().splitlines()
622 args = self.opener("undo.desc", "r").read().splitlines()
623 if len(args) >= 3 and self.ui.verbose:
623 if len(args) >= 3 and self.ui.verbose:
624 desc = _("rolling back to revision %s"
624 desc = _("rolling back to revision %s"
625 " (undo %s: %s)\n") % (
625 " (undo %s: %s)\n") % (
626 args[0], args[1], args[2])
626 args[0], args[1], args[2])
627 elif len(args) >= 2:
627 elif len(args) >= 2:
628 desc = _("rolling back to revision %s (undo %s)\n") % (
628 desc = _("rolling back to revision %s (undo %s)\n") % (
629 args[0], args[1])
629 args[0], args[1])
630 except IOError:
630 except IOError:
631 desc = _("rolling back unknown transaction\n")
631 desc = _("rolling back unknown transaction\n")
632 self.ui.status(desc)
632 self.ui.status(desc)
633 if dryrun:
633 if dryrun:
634 return
634 return
635 transaction.rollback(self.sopener, self.sjoin("undo"),
635 transaction.rollback(self.sopener, self.sjoin("undo"),
636 self.ui.warn)
636 self.ui.warn)
637 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
637 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
638 try:
638 try:
639 branch = self.opener("undo.branch").read()
639 branch = self.opener("undo.branch").read()
640 self.dirstate.setbranch(branch)
640 self.dirstate.setbranch(branch)
641 except IOError:
641 except IOError:
642 self.ui.warn(_("Named branch could not be reset, "
642 self.ui.warn(_("Named branch could not be reset, "
643 "current branch still is: %s\n")
643 "current branch still is: %s\n")
644 % encoding.tolocal(self.dirstate.branch()))
644 % encoding.tolocal(self.dirstate.branch()))
645 self.invalidate()
645 self.invalidate()
646 self.dirstate.invalidate()
646 self.dirstate.invalidate()
647 self.destroyed()
647 self.destroyed()
648 else:
648 else:
649 self.ui.warn(_("no rollback information available\n"))
649 self.ui.warn(_("no rollback information available\n"))
650 finally:
650 finally:
651 release(lock, wlock)
651 release(lock, wlock)
652
652
653 def invalidatecaches(self):
653 def invalidatecaches(self):
654 self._tags = None
654 self._tags = None
655 self._tagtypes = None
655 self._tagtypes = None
656 self.nodetagscache = None
656 self.nodetagscache = None
657 self._branchcache = None # in UTF-8
657 self._branchcache = None # in UTF-8
658 self._branchcachetip = None
658 self._branchcachetip = None
659
659
660 def invalidate(self):
660 def invalidate(self):
661 for a in "changelog manifest".split():
661 for a in "changelog manifest".split():
662 if a in self.__dict__:
662 if a in self.__dict__:
663 delattr(self, a)
663 delattr(self, a)
664 self.invalidatecaches()
664 self.invalidatecaches()
665
665
666 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
666 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
667 try:
667 try:
668 l = lock.lock(lockname, 0, releasefn, desc=desc)
668 l = lock.lock(lockname, 0, releasefn, desc=desc)
669 except error.LockHeld, inst:
669 except error.LockHeld, inst:
670 if not wait:
670 if not wait:
671 raise
671 raise
672 self.ui.warn(_("waiting for lock on %s held by %r\n") %
672 self.ui.warn(_("waiting for lock on %s held by %r\n") %
673 (desc, inst.locker))
673 (desc, inst.locker))
674 # default to 600 seconds timeout
674 # default to 600 seconds timeout
675 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
675 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
676 releasefn, desc=desc)
676 releasefn, desc=desc)
677 if acquirefn:
677 if acquirefn:
678 acquirefn()
678 acquirefn()
679 return l
679 return l
680
680
681 def lock(self, wait=True):
681 def lock(self, wait=True):
682 '''Lock the repository store (.hg/store) and return a weak reference
682 '''Lock the repository store (.hg/store) and return a weak reference
683 to the lock. Use this before modifying the store (e.g. committing or
683 to the lock. Use this before modifying the store (e.g. committing or
684 stripping). If you are opening a transaction, get a lock as well.)'''
684 stripping). If you are opening a transaction, get a lock as well.)'''
685 l = self._lockref and self._lockref()
685 l = self._lockref and self._lockref()
686 if l is not None and l.held:
686 if l is not None and l.held:
687 l.lock()
687 l.lock()
688 return l
688 return l
689
689
690 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
690 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
691 _('repository %s') % self.origroot)
691 _('repository %s') % self.origroot)
692 self._lockref = weakref.ref(l)
692 self._lockref = weakref.ref(l)
693 return l
693 return l
694
694
695 def wlock(self, wait=True):
695 def wlock(self, wait=True):
696 '''Lock the non-store parts of the repository (everything under
696 '''Lock the non-store parts of the repository (everything under
697 .hg except .hg/store) and return a weak reference to the lock.
697 .hg except .hg/store) and return a weak reference to the lock.
698 Use this before modifying files in .hg.'''
698 Use this before modifying files in .hg.'''
699 l = self._wlockref and self._wlockref()
699 l = self._wlockref and self._wlockref()
700 if l is not None and l.held:
700 if l is not None and l.held:
701 l.lock()
701 l.lock()
702 return l
702 return l
703
703
704 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
704 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
705 self.dirstate.invalidate, _('working directory of %s') %
705 self.dirstate.invalidate, _('working directory of %s') %
706 self.origroot)
706 self.origroot)
707 self._wlockref = weakref.ref(l)
707 self._wlockref = weakref.ref(l)
708 return l
708 return l
709
709
710 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
710 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
711 """
711 """
712 commit an individual file as part of a larger transaction
712 commit an individual file as part of a larger transaction
713 """
713 """
714
714
715 fname = fctx.path()
715 fname = fctx.path()
716 text = fctx.data()
716 text = fctx.data()
717 flog = self.file(fname)
717 flog = self.file(fname)
718 fparent1 = manifest1.get(fname, nullid)
718 fparent1 = manifest1.get(fname, nullid)
719 fparent2 = fparent2o = manifest2.get(fname, nullid)
719 fparent2 = fparent2o = manifest2.get(fname, nullid)
720
720
721 meta = {}
721 meta = {}
722 copy = fctx.renamed()
722 copy = fctx.renamed()
723 if copy and copy[0] != fname:
723 if copy and copy[0] != fname:
724 # Mark the new revision of this file as a copy of another
724 # Mark the new revision of this file as a copy of another
725 # file. This copy data will effectively act as a parent
725 # file. This copy data will effectively act as a parent
726 # of this new revision. If this is a merge, the first
726 # of this new revision. If this is a merge, the first
727 # parent will be the nullid (meaning "look up the copy data")
727 # parent will be the nullid (meaning "look up the copy data")
728 # and the second one will be the other parent. For example:
728 # and the second one will be the other parent. For example:
729 #
729 #
730 # 0 --- 1 --- 3 rev1 changes file foo
730 # 0 --- 1 --- 3 rev1 changes file foo
731 # \ / rev2 renames foo to bar and changes it
731 # \ / rev2 renames foo to bar and changes it
732 # \- 2 -/ rev3 should have bar with all changes and
732 # \- 2 -/ rev3 should have bar with all changes and
733 # should record that bar descends from
733 # should record that bar descends from
734 # bar in rev2 and foo in rev1
734 # bar in rev2 and foo in rev1
735 #
735 #
736 # this allows this merge to succeed:
736 # this allows this merge to succeed:
737 #
737 #
738 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
738 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
739 # \ / merging rev3 and rev4 should use bar@rev2
739 # \ / merging rev3 and rev4 should use bar@rev2
740 # \- 2 --- 4 as the merge base
740 # \- 2 --- 4 as the merge base
741 #
741 #
742
742
743 cfname = copy[0]
743 cfname = copy[0]
744 crev = manifest1.get(cfname)
744 crev = manifest1.get(cfname)
745 newfparent = fparent2
745 newfparent = fparent2
746
746
747 if manifest2: # branch merge
747 if manifest2: # branch merge
748 if fparent2 == nullid or crev is None: # copied on remote side
748 if fparent2 == nullid or crev is None: # copied on remote side
749 if cfname in manifest2:
749 if cfname in manifest2:
750 crev = manifest2[cfname]
750 crev = manifest2[cfname]
751 newfparent = fparent1
751 newfparent = fparent1
752
752
753 # find source in nearest ancestor if we've lost track
753 # find source in nearest ancestor if we've lost track
754 if not crev:
754 if not crev:
755 self.ui.debug(" %s: searching for copy revision for %s\n" %
755 self.ui.debug(" %s: searching for copy revision for %s\n" %
756 (fname, cfname))
756 (fname, cfname))
757 for ancestor in self['.'].ancestors():
757 for ancestor in self['.'].ancestors():
758 if cfname in ancestor:
758 if cfname in ancestor:
759 crev = ancestor[cfname].filenode()
759 crev = ancestor[cfname].filenode()
760 break
760 break
761
761
762 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
762 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
763 meta["copy"] = cfname
763 meta["copy"] = cfname
764 meta["copyrev"] = hex(crev)
764 meta["copyrev"] = hex(crev)
765 fparent1, fparent2 = nullid, newfparent
765 fparent1, fparent2 = nullid, newfparent
766 elif fparent2 != nullid:
766 elif fparent2 != nullid:
767 # is one parent an ancestor of the other?
767 # is one parent an ancestor of the other?
768 fparentancestor = flog.ancestor(fparent1, fparent2)
768 fparentancestor = flog.ancestor(fparent1, fparent2)
769 if fparentancestor == fparent1:
769 if fparentancestor == fparent1:
770 fparent1, fparent2 = fparent2, nullid
770 fparent1, fparent2 = fparent2, nullid
771 elif fparentancestor == fparent2:
771 elif fparentancestor == fparent2:
772 fparent2 = nullid
772 fparent2 = nullid
773
773
774 # is the file changed?
774 # is the file changed?
775 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
775 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
776 changelist.append(fname)
776 changelist.append(fname)
777 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
777 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
778
778
779 # are just the flags changed during merge?
779 # are just the flags changed during merge?
780 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
780 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
781 changelist.append(fname)
781 changelist.append(fname)
782
782
783 return fparent1
783 return fparent1
784
784
785 def commit(self, text="", user=None, date=None, match=None, force=False,
785 def commit(self, text="", user=None, date=None, match=None, force=False,
786 editor=False, extra={}):
786 editor=False, extra={}):
787 """Add a new revision to current repository.
787 """Add a new revision to current repository.
788
788
789 Revision information is gathered from the working directory,
789 Revision information is gathered from the working directory,
790 match can be used to filter the committed files. If editor is
790 match can be used to filter the committed files. If editor is
791 supplied, it is called to get a commit message.
791 supplied, it is called to get a commit message.
792 """
792 """
793
793
794 def fail(f, msg):
794 def fail(f, msg):
795 raise util.Abort('%s: %s' % (f, msg))
795 raise util.Abort('%s: %s' % (f, msg))
796
796
797 if not match:
797 if not match:
798 match = matchmod.always(self.root, '')
798 match = matchmod.always(self.root, '')
799
799
800 if not force:
800 if not force:
801 vdirs = []
801 vdirs = []
802 match.dir = vdirs.append
802 match.dir = vdirs.append
803 match.bad = fail
803 match.bad = fail
804
804
805 wlock = self.wlock()
805 wlock = self.wlock()
806 try:
806 try:
807 wctx = self[None]
807 wctx = self[None]
808 merge = len(wctx.parents()) > 1
808 merge = len(wctx.parents()) > 1
809
809
810 if (not force and merge and match and
810 if (not force and merge and match and
811 (match.files() or match.anypats())):
811 (match.files() or match.anypats())):
812 raise util.Abort(_('cannot partially commit a merge '
812 raise util.Abort(_('cannot partially commit a merge '
813 '(do not specify files or patterns)'))
813 '(do not specify files or patterns)'))
814
814
815 changes = self.status(match=match, clean=force)
815 changes = self.status(match=match, clean=force)
816 if force:
816 if force:
817 changes[0].extend(changes[6]) # mq may commit unchanged files
817 changes[0].extend(changes[6]) # mq may commit unchanged files
818
818
819 # check subrepos
819 # check subrepos
820 subs = []
820 subs = []
821 removedsubs = set()
821 removedsubs = set()
822 for p in wctx.parents():
822 for p in wctx.parents():
823 removedsubs.update(s for s in p.substate if match(s))
823 removedsubs.update(s for s in p.substate if match(s))
824 for s in wctx.substate:
824 for s in wctx.substate:
825 removedsubs.discard(s)
825 removedsubs.discard(s)
826 if match(s) and wctx.sub(s).dirty():
826 if match(s) and wctx.sub(s).dirty():
827 subs.append(s)
827 subs.append(s)
828 if (subs or removedsubs) and '.hgsubstate' not in changes[0]:
828 if (subs or removedsubs) and '.hgsubstate' not in changes[0]:
829 changes[0].insert(0, '.hgsubstate')
829 changes[0].insert(0, '.hgsubstate')
830
830
831 # make sure all explicit patterns are matched
831 # make sure all explicit patterns are matched
832 if not force and match.files():
832 if not force and match.files():
833 matched = set(changes[0] + changes[1] + changes[2])
833 matched = set(changes[0] + changes[1] + changes[2])
834
834
835 for f in match.files():
835 for f in match.files():
836 if f == '.' or f in matched or f in wctx.substate:
836 if f == '.' or f in matched or f in wctx.substate:
837 continue
837 continue
838 if f in changes[3]: # missing
838 if f in changes[3]: # missing
839 fail(f, _('file not found!'))
839 fail(f, _('file not found!'))
840 if f in vdirs: # visited directory
840 if f in vdirs: # visited directory
841 d = f + '/'
841 d = f + '/'
842 for mf in matched:
842 for mf in matched:
843 if mf.startswith(d):
843 if mf.startswith(d):
844 break
844 break
845 else:
845 else:
846 fail(f, _("no match under directory!"))
846 fail(f, _("no match under directory!"))
847 elif f not in self.dirstate:
847 elif f not in self.dirstate:
848 fail(f, _("file not tracked!"))
848 fail(f, _("file not tracked!"))
849
849
850 if (not force and not extra.get("close") and not merge
850 if (not force and not extra.get("close") and not merge
851 and not (changes[0] or changes[1] or changes[2])
851 and not (changes[0] or changes[1] or changes[2])
852 and wctx.branch() == wctx.p1().branch()):
852 and wctx.branch() == wctx.p1().branch()):
853 return None
853 return None
854
854
855 ms = mergemod.mergestate(self)
855 ms = mergemod.mergestate(self)
856 for f in changes[0]:
856 for f in changes[0]:
857 if f in ms and ms[f] == 'u':
857 if f in ms and ms[f] == 'u':
858 raise util.Abort(_("unresolved merge conflicts "
858 raise util.Abort(_("unresolved merge conflicts "
859 "(see hg resolve)"))
859 "(see hg resolve)"))
860
860
861 cctx = context.workingctx(self, text, user, date, extra, changes)
861 cctx = context.workingctx(self, text, user, date, extra, changes)
862 if editor:
862 if editor:
863 cctx._text = editor(self, cctx, subs)
863 cctx._text = editor(self, cctx, subs)
864 edited = (text != cctx._text)
864 edited = (text != cctx._text)
865
865
866 # commit subs
866 # commit subs
867 if subs or removedsubs:
867 if subs or removedsubs:
868 state = wctx.substate.copy()
868 state = wctx.substate.copy()
869 for s in subs:
869 for s in subs:
870 self.ui.status(_('committing subrepository %s\n') % s)
870 sub = wctx.sub(s)
871 sr = wctx.sub(s).commit(cctx._text, user, date)
871 self.ui.status(_('committing subrepository %s\n') %
872 subrepo.relpath(sub))
873 sr = sub.commit(cctx._text, user, date)
872 state[s] = (state[s][0], sr)
874 state[s] = (state[s][0], sr)
873 subrepo.writestate(self, state)
875 subrepo.writestate(self, state)
874
876
875 # Save commit message in case this transaction gets rolled back
877 # Save commit message in case this transaction gets rolled back
876 # (e.g. by a pretxncommit hook). Leave the content alone on
878 # (e.g. by a pretxncommit hook). Leave the content alone on
877 # the assumption that the user will use the same editor again.
879 # the assumption that the user will use the same editor again.
878 msgfile = self.opener('last-message.txt', 'wb')
880 msgfile = self.opener('last-message.txt', 'wb')
879 msgfile.write(cctx._text)
881 msgfile.write(cctx._text)
880 msgfile.close()
882 msgfile.close()
881
883
882 p1, p2 = self.dirstate.parents()
884 p1, p2 = self.dirstate.parents()
883 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
885 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
884 try:
886 try:
885 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
887 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
886 ret = self.commitctx(cctx, True)
888 ret = self.commitctx(cctx, True)
887 except:
889 except:
888 if edited:
890 if edited:
889 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
891 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
890 self.ui.write(
892 self.ui.write(
891 _('note: commit message saved in %s\n') % msgfn)
893 _('note: commit message saved in %s\n') % msgfn)
892 raise
894 raise
893
895
894 # update dirstate and mergestate
896 # update dirstate and mergestate
895 for f in changes[0] + changes[1]:
897 for f in changes[0] + changes[1]:
896 self.dirstate.normal(f)
898 self.dirstate.normal(f)
897 for f in changes[2]:
899 for f in changes[2]:
898 self.dirstate.forget(f)
900 self.dirstate.forget(f)
899 self.dirstate.setparents(ret)
901 self.dirstate.setparents(ret)
900 ms.reset()
902 ms.reset()
901 finally:
903 finally:
902 wlock.release()
904 wlock.release()
903
905
904 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
906 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
905 return ret
907 return ret
906
908
907 def commitctx(self, ctx, error=False):
909 def commitctx(self, ctx, error=False):
908 """Add a new revision to current repository.
910 """Add a new revision to current repository.
909 Revision information is passed via the context argument.
911 Revision information is passed via the context argument.
910 """
912 """
911
913
912 tr = lock = None
914 tr = lock = None
913 removed = ctx.removed()
915 removed = ctx.removed()
914 p1, p2 = ctx.p1(), ctx.p2()
916 p1, p2 = ctx.p1(), ctx.p2()
915 m1 = p1.manifest().copy()
917 m1 = p1.manifest().copy()
916 m2 = p2.manifest()
918 m2 = p2.manifest()
917 user = ctx.user()
919 user = ctx.user()
918
920
919 lock = self.lock()
921 lock = self.lock()
920 try:
922 try:
921 tr = self.transaction("commit")
923 tr = self.transaction("commit")
922 trp = weakref.proxy(tr)
924 trp = weakref.proxy(tr)
923
925
924 # check in files
926 # check in files
925 new = {}
927 new = {}
926 changed = []
928 changed = []
927 linkrev = len(self)
929 linkrev = len(self)
928 for f in sorted(ctx.modified() + ctx.added()):
930 for f in sorted(ctx.modified() + ctx.added()):
929 self.ui.note(f + "\n")
931 self.ui.note(f + "\n")
930 try:
932 try:
931 fctx = ctx[f]
933 fctx = ctx[f]
932 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
934 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
933 changed)
935 changed)
934 m1.set(f, fctx.flags())
936 m1.set(f, fctx.flags())
935 except OSError, inst:
937 except OSError, inst:
936 self.ui.warn(_("trouble committing %s!\n") % f)
938 self.ui.warn(_("trouble committing %s!\n") % f)
937 raise
939 raise
938 except IOError, inst:
940 except IOError, inst:
939 errcode = getattr(inst, 'errno', errno.ENOENT)
941 errcode = getattr(inst, 'errno', errno.ENOENT)
940 if error or errcode and errcode != errno.ENOENT:
942 if error or errcode and errcode != errno.ENOENT:
941 self.ui.warn(_("trouble committing %s!\n") % f)
943 self.ui.warn(_("trouble committing %s!\n") % f)
942 raise
944 raise
943 else:
945 else:
944 removed.append(f)
946 removed.append(f)
945
947
946 # update manifest
948 # update manifest
947 m1.update(new)
949 m1.update(new)
948 removed = [f for f in sorted(removed) if f in m1 or f in m2]
950 removed = [f for f in sorted(removed) if f in m1 or f in m2]
949 drop = [f for f in removed if f in m1]
951 drop = [f for f in removed if f in m1]
950 for f in drop:
952 for f in drop:
951 del m1[f]
953 del m1[f]
952 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
954 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
953 p2.manifestnode(), (new, drop))
955 p2.manifestnode(), (new, drop))
954
956
955 # update changelog
957 # update changelog
956 self.changelog.delayupdate()
958 self.changelog.delayupdate()
957 n = self.changelog.add(mn, changed + removed, ctx.description(),
959 n = self.changelog.add(mn, changed + removed, ctx.description(),
958 trp, p1.node(), p2.node(),
960 trp, p1.node(), p2.node(),
959 user, ctx.date(), ctx.extra().copy())
961 user, ctx.date(), ctx.extra().copy())
960 p = lambda: self.changelog.writepending() and self.root or ""
962 p = lambda: self.changelog.writepending() and self.root or ""
961 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
963 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
962 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
964 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
963 parent2=xp2, pending=p)
965 parent2=xp2, pending=p)
964 self.changelog.finalize(trp)
966 self.changelog.finalize(trp)
965 tr.close()
967 tr.close()
966
968
967 if self._branchcache:
969 if self._branchcache:
968 self.branchtags()
970 self.branchtags()
969 return n
971 return n
970 finally:
972 finally:
971 del tr
973 del tr
972 lock.release()
974 lock.release()
973
975
974 def destroyed(self):
976 def destroyed(self):
975 '''Inform the repository that nodes have been destroyed.
977 '''Inform the repository that nodes have been destroyed.
976 Intended for use by strip and rollback, so there's a common
978 Intended for use by strip and rollback, so there's a common
977 place for anything that has to be done after destroying history.'''
979 place for anything that has to be done after destroying history.'''
978 # XXX it might be nice if we could take the list of destroyed
980 # XXX it might be nice if we could take the list of destroyed
979 # nodes, but I don't see an easy way for rollback() to do that
981 # nodes, but I don't see an easy way for rollback() to do that
980
982
981 # Ensure the persistent tag cache is updated. Doing it now
983 # Ensure the persistent tag cache is updated. Doing it now
982 # means that the tag cache only has to worry about destroyed
984 # means that the tag cache only has to worry about destroyed
983 # heads immediately after a strip/rollback. That in turn
985 # heads immediately after a strip/rollback. That in turn
984 # guarantees that "cachetip == currenttip" (comparing both rev
986 # guarantees that "cachetip == currenttip" (comparing both rev
985 # and node) always means no nodes have been added or destroyed.
987 # and node) always means no nodes have been added or destroyed.
986
988
987 # XXX this is suboptimal when qrefresh'ing: we strip the current
989 # XXX this is suboptimal when qrefresh'ing: we strip the current
988 # head, refresh the tag cache, then immediately add a new head.
990 # head, refresh the tag cache, then immediately add a new head.
989 # But I think doing it this way is necessary for the "instant
991 # But I think doing it this way is necessary for the "instant
990 # tag cache retrieval" case to work.
992 # tag cache retrieval" case to work.
991 self.invalidatecaches()
993 self.invalidatecaches()
992
994
993 def walk(self, match, node=None):
995 def walk(self, match, node=None):
994 '''
996 '''
995 walk recursively through the directory tree or a given
997 walk recursively through the directory tree or a given
996 changeset, finding all files matched by the match
998 changeset, finding all files matched by the match
997 function
999 function
998 '''
1000 '''
999 return self[node].walk(match)
1001 return self[node].walk(match)
1000
1002
1001 def status(self, node1='.', node2=None, match=None,
1003 def status(self, node1='.', node2=None, match=None,
1002 ignored=False, clean=False, unknown=False):
1004 ignored=False, clean=False, unknown=False):
1003 """return status of files between two nodes or node and working directory
1005 """return status of files between two nodes or node and working directory
1004
1006
1005 If node1 is None, use the first dirstate parent instead.
1007 If node1 is None, use the first dirstate parent instead.
1006 If node2 is None, compare node1 with working directory.
1008 If node2 is None, compare node1 with working directory.
1007 """
1009 """
1008
1010
1009 def mfmatches(ctx):
1011 def mfmatches(ctx):
1010 mf = ctx.manifest().copy()
1012 mf = ctx.manifest().copy()
1011 for fn in mf.keys():
1013 for fn in mf.keys():
1012 if not match(fn):
1014 if not match(fn):
1013 del mf[fn]
1015 del mf[fn]
1014 return mf
1016 return mf
1015
1017
1016 if isinstance(node1, context.changectx):
1018 if isinstance(node1, context.changectx):
1017 ctx1 = node1
1019 ctx1 = node1
1018 else:
1020 else:
1019 ctx1 = self[node1]
1021 ctx1 = self[node1]
1020 if isinstance(node2, context.changectx):
1022 if isinstance(node2, context.changectx):
1021 ctx2 = node2
1023 ctx2 = node2
1022 else:
1024 else:
1023 ctx2 = self[node2]
1025 ctx2 = self[node2]
1024
1026
1025 working = ctx2.rev() is None
1027 working = ctx2.rev() is None
1026 parentworking = working and ctx1 == self['.']
1028 parentworking = working and ctx1 == self['.']
1027 match = match or matchmod.always(self.root, self.getcwd())
1029 match = match or matchmod.always(self.root, self.getcwd())
1028 listignored, listclean, listunknown = ignored, clean, unknown
1030 listignored, listclean, listunknown = ignored, clean, unknown
1029
1031
1030 # load earliest manifest first for caching reasons
1032 # load earliest manifest first for caching reasons
1031 if not working and ctx2.rev() < ctx1.rev():
1033 if not working and ctx2.rev() < ctx1.rev():
1032 ctx2.manifest()
1034 ctx2.manifest()
1033
1035
1034 if not parentworking:
1036 if not parentworking:
1035 def bad(f, msg):
1037 def bad(f, msg):
1036 if f not in ctx1:
1038 if f not in ctx1:
1037 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1039 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1038 match.bad = bad
1040 match.bad = bad
1039
1041
1040 if working: # we need to scan the working dir
1042 if working: # we need to scan the working dir
1041 subrepos = ctx1.substate.keys()
1043 subrepos = ctx1.substate.keys()
1042 s = self.dirstate.status(match, subrepos, listignored,
1044 s = self.dirstate.status(match, subrepos, listignored,
1043 listclean, listunknown)
1045 listclean, listunknown)
1044 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1046 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1045
1047
1046 # check for any possibly clean files
1048 # check for any possibly clean files
1047 if parentworking and cmp:
1049 if parentworking and cmp:
1048 fixup = []
1050 fixup = []
1049 # do a full compare of any files that might have changed
1051 # do a full compare of any files that might have changed
1050 for f in sorted(cmp):
1052 for f in sorted(cmp):
1051 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1053 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1052 or ctx1[f].cmp(ctx2[f].data())):
1054 or ctx1[f].cmp(ctx2[f].data())):
1053 modified.append(f)
1055 modified.append(f)
1054 else:
1056 else:
1055 fixup.append(f)
1057 fixup.append(f)
1056
1058
1057 if listclean:
1059 if listclean:
1058 clean += fixup
1060 clean += fixup
1059
1061
1060 # update dirstate for files that are actually clean
1062 # update dirstate for files that are actually clean
1061 if fixup:
1063 if fixup:
1062 try:
1064 try:
1063 # updating the dirstate is optional
1065 # updating the dirstate is optional
1064 # so we don't wait on the lock
1066 # so we don't wait on the lock
1065 wlock = self.wlock(False)
1067 wlock = self.wlock(False)
1066 try:
1068 try:
1067 for f in fixup:
1069 for f in fixup:
1068 self.dirstate.normal(f)
1070 self.dirstate.normal(f)
1069 finally:
1071 finally:
1070 wlock.release()
1072 wlock.release()
1071 except error.LockError:
1073 except error.LockError:
1072 pass
1074 pass
1073
1075
1074 if not parentworking:
1076 if not parentworking:
1075 mf1 = mfmatches(ctx1)
1077 mf1 = mfmatches(ctx1)
1076 if working:
1078 if working:
1077 # we are comparing working dir against non-parent
1079 # we are comparing working dir against non-parent
1078 # generate a pseudo-manifest for the working dir
1080 # generate a pseudo-manifest for the working dir
1079 mf2 = mfmatches(self['.'])
1081 mf2 = mfmatches(self['.'])
1080 for f in cmp + modified + added:
1082 for f in cmp + modified + added:
1081 mf2[f] = None
1083 mf2[f] = None
1082 mf2.set(f, ctx2.flags(f))
1084 mf2.set(f, ctx2.flags(f))
1083 for f in removed:
1085 for f in removed:
1084 if f in mf2:
1086 if f in mf2:
1085 del mf2[f]
1087 del mf2[f]
1086 else:
1088 else:
1087 # we are comparing two revisions
1089 # we are comparing two revisions
1088 deleted, unknown, ignored = [], [], []
1090 deleted, unknown, ignored = [], [], []
1089 mf2 = mfmatches(ctx2)
1091 mf2 = mfmatches(ctx2)
1090
1092
1091 modified, added, clean = [], [], []
1093 modified, added, clean = [], [], []
1092 for fn in mf2:
1094 for fn in mf2:
1093 if fn in mf1:
1095 if fn in mf1:
1094 if (mf1.flags(fn) != mf2.flags(fn) or
1096 if (mf1.flags(fn) != mf2.flags(fn) or
1095 (mf1[fn] != mf2[fn] and
1097 (mf1[fn] != mf2[fn] and
1096 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1098 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1097 modified.append(fn)
1099 modified.append(fn)
1098 elif listclean:
1100 elif listclean:
1099 clean.append(fn)
1101 clean.append(fn)
1100 del mf1[fn]
1102 del mf1[fn]
1101 else:
1103 else:
1102 added.append(fn)
1104 added.append(fn)
1103 removed = mf1.keys()
1105 removed = mf1.keys()
1104
1106
1105 r = modified, added, removed, deleted, unknown, ignored, clean
1107 r = modified, added, removed, deleted, unknown, ignored, clean
1106 [l.sort() for l in r]
1108 [l.sort() for l in r]
1107 return r
1109 return r
1108
1110
1109 def add(self, list):
1111 def add(self, list):
1110 wlock = self.wlock()
1112 wlock = self.wlock()
1111 try:
1113 try:
1112 rejected = []
1114 rejected = []
1113 for f in list:
1115 for f in list:
1114 p = self.wjoin(f)
1116 p = self.wjoin(f)
1115 try:
1117 try:
1116 st = os.lstat(p)
1118 st = os.lstat(p)
1117 except:
1119 except:
1118 self.ui.warn(_("%s does not exist!\n") % f)
1120 self.ui.warn(_("%s does not exist!\n") % f)
1119 rejected.append(f)
1121 rejected.append(f)
1120 continue
1122 continue
1121 if st.st_size > 10000000:
1123 if st.st_size > 10000000:
1122 self.ui.warn(_("%s: up to %d MB of RAM may be required "
1124 self.ui.warn(_("%s: up to %d MB of RAM may be required "
1123 "to manage this file\n"
1125 "to manage this file\n"
1124 "(use 'hg revert %s' to cancel the "
1126 "(use 'hg revert %s' to cancel the "
1125 "pending addition)\n")
1127 "pending addition)\n")
1126 % (f, 3 * st.st_size // 1000000, f))
1128 % (f, 3 * st.st_size // 1000000, f))
1127 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1129 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1128 self.ui.warn(_("%s not added: only files and symlinks "
1130 self.ui.warn(_("%s not added: only files and symlinks "
1129 "supported currently\n") % f)
1131 "supported currently\n") % f)
1130 rejected.append(p)
1132 rejected.append(p)
1131 elif self.dirstate[f] in 'amn':
1133 elif self.dirstate[f] in 'amn':
1132 self.ui.warn(_("%s already tracked!\n") % f)
1134 self.ui.warn(_("%s already tracked!\n") % f)
1133 elif self.dirstate[f] == 'r':
1135 elif self.dirstate[f] == 'r':
1134 self.dirstate.normallookup(f)
1136 self.dirstate.normallookup(f)
1135 else:
1137 else:
1136 self.dirstate.add(f)
1138 self.dirstate.add(f)
1137 return rejected
1139 return rejected
1138 finally:
1140 finally:
1139 wlock.release()
1141 wlock.release()
1140
1142
1141 def forget(self, list):
1143 def forget(self, list):
1142 wlock = self.wlock()
1144 wlock = self.wlock()
1143 try:
1145 try:
1144 for f in list:
1146 for f in list:
1145 if self.dirstate[f] != 'a':
1147 if self.dirstate[f] != 'a':
1146 self.ui.warn(_("%s not added!\n") % f)
1148 self.ui.warn(_("%s not added!\n") % f)
1147 else:
1149 else:
1148 self.dirstate.forget(f)
1150 self.dirstate.forget(f)
1149 finally:
1151 finally:
1150 wlock.release()
1152 wlock.release()
1151
1153
1152 def remove(self, list, unlink=False):
1154 def remove(self, list, unlink=False):
1153 if unlink:
1155 if unlink:
1154 for f in list:
1156 for f in list:
1155 try:
1157 try:
1156 util.unlink(self.wjoin(f))
1158 util.unlink(self.wjoin(f))
1157 except OSError, inst:
1159 except OSError, inst:
1158 if inst.errno != errno.ENOENT:
1160 if inst.errno != errno.ENOENT:
1159 raise
1161 raise
1160 wlock = self.wlock()
1162 wlock = self.wlock()
1161 try:
1163 try:
1162 for f in list:
1164 for f in list:
1163 if unlink and os.path.exists(self.wjoin(f)):
1165 if unlink and os.path.exists(self.wjoin(f)):
1164 self.ui.warn(_("%s still exists!\n") % f)
1166 self.ui.warn(_("%s still exists!\n") % f)
1165 elif self.dirstate[f] == 'a':
1167 elif self.dirstate[f] == 'a':
1166 self.dirstate.forget(f)
1168 self.dirstate.forget(f)
1167 elif f not in self.dirstate:
1169 elif f not in self.dirstate:
1168 self.ui.warn(_("%s not tracked!\n") % f)
1170 self.ui.warn(_("%s not tracked!\n") % f)
1169 else:
1171 else:
1170 self.dirstate.remove(f)
1172 self.dirstate.remove(f)
1171 finally:
1173 finally:
1172 wlock.release()
1174 wlock.release()
1173
1175
1174 def undelete(self, list):
1176 def undelete(self, list):
1175 manifests = [self.manifest.read(self.changelog.read(p)[0])
1177 manifests = [self.manifest.read(self.changelog.read(p)[0])
1176 for p in self.dirstate.parents() if p != nullid]
1178 for p in self.dirstate.parents() if p != nullid]
1177 wlock = self.wlock()
1179 wlock = self.wlock()
1178 try:
1180 try:
1179 for f in list:
1181 for f in list:
1180 if self.dirstate[f] != 'r':
1182 if self.dirstate[f] != 'r':
1181 self.ui.warn(_("%s not removed!\n") % f)
1183 self.ui.warn(_("%s not removed!\n") % f)
1182 else:
1184 else:
1183 m = f in manifests[0] and manifests[0] or manifests[1]
1185 m = f in manifests[0] and manifests[0] or manifests[1]
1184 t = self.file(f).read(m[f])
1186 t = self.file(f).read(m[f])
1185 self.wwrite(f, t, m.flags(f))
1187 self.wwrite(f, t, m.flags(f))
1186 self.dirstate.normal(f)
1188 self.dirstate.normal(f)
1187 finally:
1189 finally:
1188 wlock.release()
1190 wlock.release()
1189
1191
1190 def copy(self, source, dest):
1192 def copy(self, source, dest):
1191 p = self.wjoin(dest)
1193 p = self.wjoin(dest)
1192 if not (os.path.exists(p) or os.path.islink(p)):
1194 if not (os.path.exists(p) or os.path.islink(p)):
1193 self.ui.warn(_("%s does not exist!\n") % dest)
1195 self.ui.warn(_("%s does not exist!\n") % dest)
1194 elif not (os.path.isfile(p) or os.path.islink(p)):
1196 elif not (os.path.isfile(p) or os.path.islink(p)):
1195 self.ui.warn(_("copy failed: %s is not a file or a "
1197 self.ui.warn(_("copy failed: %s is not a file or a "
1196 "symbolic link\n") % dest)
1198 "symbolic link\n") % dest)
1197 else:
1199 else:
1198 wlock = self.wlock()
1200 wlock = self.wlock()
1199 try:
1201 try:
1200 if self.dirstate[dest] in '?r':
1202 if self.dirstate[dest] in '?r':
1201 self.dirstate.add(dest)
1203 self.dirstate.add(dest)
1202 self.dirstate.copy(source, dest)
1204 self.dirstate.copy(source, dest)
1203 finally:
1205 finally:
1204 wlock.release()
1206 wlock.release()
1205
1207
1206 def heads(self, start=None):
1208 def heads(self, start=None):
1207 heads = self.changelog.heads(start)
1209 heads = self.changelog.heads(start)
1208 # sort the output in rev descending order
1210 # sort the output in rev descending order
1209 heads = [(-self.changelog.rev(h), h) for h in heads]
1211 heads = [(-self.changelog.rev(h), h) for h in heads]
1210 return [n for (r, n) in sorted(heads)]
1212 return [n for (r, n) in sorted(heads)]
1211
1213
1212 def branchheads(self, branch=None, start=None, closed=False):
1214 def branchheads(self, branch=None, start=None, closed=False):
1213 '''return a (possibly filtered) list of heads for the given branch
1215 '''return a (possibly filtered) list of heads for the given branch
1214
1216
1215 Heads are returned in topological order, from newest to oldest.
1217 Heads are returned in topological order, from newest to oldest.
1216 If branch is None, use the dirstate branch.
1218 If branch is None, use the dirstate branch.
1217 If start is not None, return only heads reachable from start.
1219 If start is not None, return only heads reachable from start.
1218 If closed is True, return heads that are marked as closed as well.
1220 If closed is True, return heads that are marked as closed as well.
1219 '''
1221 '''
1220 if branch is None:
1222 if branch is None:
1221 branch = self[None].branch()
1223 branch = self[None].branch()
1222 branches = self.branchmap()
1224 branches = self.branchmap()
1223 if branch not in branches:
1225 if branch not in branches:
1224 return []
1226 return []
1225 # the cache returns heads ordered lowest to highest
1227 # the cache returns heads ordered lowest to highest
1226 bheads = list(reversed(branches[branch]))
1228 bheads = list(reversed(branches[branch]))
1227 if start is not None:
1229 if start is not None:
1228 # filter out the heads that cannot be reached from startrev
1230 # filter out the heads that cannot be reached from startrev
1229 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1231 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1230 bheads = [h for h in bheads if h in fbheads]
1232 bheads = [h for h in bheads if h in fbheads]
1231 if not closed:
1233 if not closed:
1232 bheads = [h for h in bheads if
1234 bheads = [h for h in bheads if
1233 ('close' not in self.changelog.read(h)[5])]
1235 ('close' not in self.changelog.read(h)[5])]
1234 return bheads
1236 return bheads
1235
1237
1236 def branches(self, nodes):
1238 def branches(self, nodes):
1237 if not nodes:
1239 if not nodes:
1238 nodes = [self.changelog.tip()]
1240 nodes = [self.changelog.tip()]
1239 b = []
1241 b = []
1240 for n in nodes:
1242 for n in nodes:
1241 t = n
1243 t = n
1242 while 1:
1244 while 1:
1243 p = self.changelog.parents(n)
1245 p = self.changelog.parents(n)
1244 if p[1] != nullid or p[0] == nullid:
1246 if p[1] != nullid or p[0] == nullid:
1245 b.append((t, n, p[0], p[1]))
1247 b.append((t, n, p[0], p[1]))
1246 break
1248 break
1247 n = p[0]
1249 n = p[0]
1248 return b
1250 return b
1249
1251
1250 def between(self, pairs):
1252 def between(self, pairs):
1251 r = []
1253 r = []
1252
1254
1253 for top, bottom in pairs:
1255 for top, bottom in pairs:
1254 n, l, i = top, [], 0
1256 n, l, i = top, [], 0
1255 f = 1
1257 f = 1
1256
1258
1257 while n != bottom and n != nullid:
1259 while n != bottom and n != nullid:
1258 p = self.changelog.parents(n)[0]
1260 p = self.changelog.parents(n)[0]
1259 if i == f:
1261 if i == f:
1260 l.append(n)
1262 l.append(n)
1261 f = f * 2
1263 f = f * 2
1262 n = p
1264 n = p
1263 i += 1
1265 i += 1
1264
1266
1265 r.append(l)
1267 r.append(l)
1266
1268
1267 return r
1269 return r
1268
1270
1269 def findincoming(self, remote, base=None, heads=None, force=False):
1271 def findincoming(self, remote, base=None, heads=None, force=False):
1270 """Return list of roots of the subsets of missing nodes from remote
1272 """Return list of roots of the subsets of missing nodes from remote
1271
1273
1272 If base dict is specified, assume that these nodes and their parents
1274 If base dict is specified, assume that these nodes and their parents
1273 exist on the remote side and that no child of a node of base exists
1275 exist on the remote side and that no child of a node of base exists
1274 in both remote and self.
1276 in both remote and self.
1275 Furthermore base will be updated to include the nodes that exists
1277 Furthermore base will be updated to include the nodes that exists
1276 in self and remote but no children exists in self and remote.
1278 in self and remote but no children exists in self and remote.
1277 If a list of heads is specified, return only nodes which are heads
1279 If a list of heads is specified, return only nodes which are heads
1278 or ancestors of these heads.
1280 or ancestors of these heads.
1279
1281
1280 All the ancestors of base are in self and in remote.
1282 All the ancestors of base are in self and in remote.
1281 All the descendants of the list returned are missing in self.
1283 All the descendants of the list returned are missing in self.
1282 (and so we know that the rest of the nodes are missing in remote, see
1284 (and so we know that the rest of the nodes are missing in remote, see
1283 outgoing)
1285 outgoing)
1284 """
1286 """
1285 return self.findcommonincoming(remote, base, heads, force)[1]
1287 return self.findcommonincoming(remote, base, heads, force)[1]
1286
1288
1287 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1289 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1288 """Return a tuple (common, missing roots, heads) used to identify
1290 """Return a tuple (common, missing roots, heads) used to identify
1289 missing nodes from remote.
1291 missing nodes from remote.
1290
1292
1291 If base dict is specified, assume that these nodes and their parents
1293 If base dict is specified, assume that these nodes and their parents
1292 exist on the remote side and that no child of a node of base exists
1294 exist on the remote side and that no child of a node of base exists
1293 in both remote and self.
1295 in both remote and self.
1294 Furthermore base will be updated to include the nodes that exists
1296 Furthermore base will be updated to include the nodes that exists
1295 in self and remote but no children exists in self and remote.
1297 in self and remote but no children exists in self and remote.
1296 If a list of heads is specified, return only nodes which are heads
1298 If a list of heads is specified, return only nodes which are heads
1297 or ancestors of these heads.
1299 or ancestors of these heads.
1298
1300
1299 All the ancestors of base are in self and in remote.
1301 All the ancestors of base are in self and in remote.
1300 """
1302 """
1301 m = self.changelog.nodemap
1303 m = self.changelog.nodemap
1302 search = []
1304 search = []
1303 fetch = set()
1305 fetch = set()
1304 seen = set()
1306 seen = set()
1305 seenbranch = set()
1307 seenbranch = set()
1306 if base is None:
1308 if base is None:
1307 base = {}
1309 base = {}
1308
1310
1309 if not heads:
1311 if not heads:
1310 heads = remote.heads()
1312 heads = remote.heads()
1311
1313
1312 if self.changelog.tip() == nullid:
1314 if self.changelog.tip() == nullid:
1313 base[nullid] = 1
1315 base[nullid] = 1
1314 if heads != [nullid]:
1316 if heads != [nullid]:
1315 return [nullid], [nullid], list(heads)
1317 return [nullid], [nullid], list(heads)
1316 return [nullid], [], []
1318 return [nullid], [], []
1317
1319
1318 # assume we're closer to the tip than the root
1320 # assume we're closer to the tip than the root
1319 # and start by examining the heads
1321 # and start by examining the heads
1320 self.ui.status(_("searching for changes\n"))
1322 self.ui.status(_("searching for changes\n"))
1321
1323
1322 unknown = []
1324 unknown = []
1323 for h in heads:
1325 for h in heads:
1324 if h not in m:
1326 if h not in m:
1325 unknown.append(h)
1327 unknown.append(h)
1326 else:
1328 else:
1327 base[h] = 1
1329 base[h] = 1
1328
1330
1329 heads = unknown
1331 heads = unknown
1330 if not unknown:
1332 if not unknown:
1331 return base.keys(), [], []
1333 return base.keys(), [], []
1332
1334
1333 req = set(unknown)
1335 req = set(unknown)
1334 reqcnt = 0
1336 reqcnt = 0
1335
1337
1336 # search through remote branches
1338 # search through remote branches
1337 # a 'branch' here is a linear segment of history, with four parts:
1339 # a 'branch' here is a linear segment of history, with four parts:
1338 # head, root, first parent, second parent
1340 # head, root, first parent, second parent
1339 # (a branch always has two parents (or none) by definition)
1341 # (a branch always has two parents (or none) by definition)
1340 unknown = remote.branches(unknown)
1342 unknown = remote.branches(unknown)
1341 while unknown:
1343 while unknown:
1342 r = []
1344 r = []
1343 while unknown:
1345 while unknown:
1344 n = unknown.pop(0)
1346 n = unknown.pop(0)
1345 if n[0] in seen:
1347 if n[0] in seen:
1346 continue
1348 continue
1347
1349
1348 self.ui.debug("examining %s:%s\n"
1350 self.ui.debug("examining %s:%s\n"
1349 % (short(n[0]), short(n[1])))
1351 % (short(n[0]), short(n[1])))
1350 if n[0] == nullid: # found the end of the branch
1352 if n[0] == nullid: # found the end of the branch
1351 pass
1353 pass
1352 elif n in seenbranch:
1354 elif n in seenbranch:
1353 self.ui.debug("branch already found\n")
1355 self.ui.debug("branch already found\n")
1354 continue
1356 continue
1355 elif n[1] and n[1] in m: # do we know the base?
1357 elif n[1] and n[1] in m: # do we know the base?
1356 self.ui.debug("found incomplete branch %s:%s\n"
1358 self.ui.debug("found incomplete branch %s:%s\n"
1357 % (short(n[0]), short(n[1])))
1359 % (short(n[0]), short(n[1])))
1358 search.append(n[0:2]) # schedule branch range for scanning
1360 search.append(n[0:2]) # schedule branch range for scanning
1359 seenbranch.add(n)
1361 seenbranch.add(n)
1360 else:
1362 else:
1361 if n[1] not in seen and n[1] not in fetch:
1363 if n[1] not in seen and n[1] not in fetch:
1362 if n[2] in m and n[3] in m:
1364 if n[2] in m and n[3] in m:
1363 self.ui.debug("found new changeset %s\n" %
1365 self.ui.debug("found new changeset %s\n" %
1364 short(n[1]))
1366 short(n[1]))
1365 fetch.add(n[1]) # earliest unknown
1367 fetch.add(n[1]) # earliest unknown
1366 for p in n[2:4]:
1368 for p in n[2:4]:
1367 if p in m:
1369 if p in m:
1368 base[p] = 1 # latest known
1370 base[p] = 1 # latest known
1369
1371
1370 for p in n[2:4]:
1372 for p in n[2:4]:
1371 if p not in req and p not in m:
1373 if p not in req and p not in m:
1372 r.append(p)
1374 r.append(p)
1373 req.add(p)
1375 req.add(p)
1374 seen.add(n[0])
1376 seen.add(n[0])
1375
1377
1376 if r:
1378 if r:
1377 reqcnt += 1
1379 reqcnt += 1
1378 self.ui.progress(_('searching'), reqcnt, unit=_('queries'))
1380 self.ui.progress(_('searching'), reqcnt, unit=_('queries'))
1379 self.ui.debug("request %d: %s\n" %
1381 self.ui.debug("request %d: %s\n" %
1380 (reqcnt, " ".join(map(short, r))))
1382 (reqcnt, " ".join(map(short, r))))
1381 for p in xrange(0, len(r), 10):
1383 for p in xrange(0, len(r), 10):
1382 for b in remote.branches(r[p:p + 10]):
1384 for b in remote.branches(r[p:p + 10]):
1383 self.ui.debug("received %s:%s\n" %
1385 self.ui.debug("received %s:%s\n" %
1384 (short(b[0]), short(b[1])))
1386 (short(b[0]), short(b[1])))
1385 unknown.append(b)
1387 unknown.append(b)
1386
1388
1387 # do binary search on the branches we found
1389 # do binary search on the branches we found
1388 while search:
1390 while search:
1389 newsearch = []
1391 newsearch = []
1390 reqcnt += 1
1392 reqcnt += 1
1391 self.ui.progress(_('searching'), reqcnt, unit=_('queries'))
1393 self.ui.progress(_('searching'), reqcnt, unit=_('queries'))
1392 for n, l in zip(search, remote.between(search)):
1394 for n, l in zip(search, remote.between(search)):
1393 l.append(n[1])
1395 l.append(n[1])
1394 p = n[0]
1396 p = n[0]
1395 f = 1
1397 f = 1
1396 for i in l:
1398 for i in l:
1397 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1399 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1398 if i in m:
1400 if i in m:
1399 if f <= 2:
1401 if f <= 2:
1400 self.ui.debug("found new branch changeset %s\n" %
1402 self.ui.debug("found new branch changeset %s\n" %
1401 short(p))
1403 short(p))
1402 fetch.add(p)
1404 fetch.add(p)
1403 base[i] = 1
1405 base[i] = 1
1404 else:
1406 else:
1405 self.ui.debug("narrowed branch search to %s:%s\n"
1407 self.ui.debug("narrowed branch search to %s:%s\n"
1406 % (short(p), short(i)))
1408 % (short(p), short(i)))
1407 newsearch.append((p, i))
1409 newsearch.append((p, i))
1408 break
1410 break
1409 p, f = i, f * 2
1411 p, f = i, f * 2
1410 search = newsearch
1412 search = newsearch
1411
1413
1412 # sanity check our fetch list
1414 # sanity check our fetch list
1413 for f in fetch:
1415 for f in fetch:
1414 if f in m:
1416 if f in m:
1415 raise error.RepoError(_("already have changeset ")
1417 raise error.RepoError(_("already have changeset ")
1416 + short(f[:4]))
1418 + short(f[:4]))
1417
1419
1418 if base.keys() == [nullid]:
1420 if base.keys() == [nullid]:
1419 if force:
1421 if force:
1420 self.ui.warn(_("warning: repository is unrelated\n"))
1422 self.ui.warn(_("warning: repository is unrelated\n"))
1421 else:
1423 else:
1422 raise util.Abort(_("repository is unrelated"))
1424 raise util.Abort(_("repository is unrelated"))
1423
1425
1424 self.ui.debug("found new changesets starting at " +
1426 self.ui.debug("found new changesets starting at " +
1425 " ".join([short(f) for f in fetch]) + "\n")
1427 " ".join([short(f) for f in fetch]) + "\n")
1426
1428
1427 self.ui.progress(_('searching'), None)
1429 self.ui.progress(_('searching'), None)
1428 self.ui.debug("%d total queries\n" % reqcnt)
1430 self.ui.debug("%d total queries\n" % reqcnt)
1429
1431
1430 return base.keys(), list(fetch), heads
1432 return base.keys(), list(fetch), heads
1431
1433
1432 def findoutgoing(self, remote, base=None, heads=None, force=False):
1434 def findoutgoing(self, remote, base=None, heads=None, force=False):
1433 """Return list of nodes that are roots of subsets not in remote
1435 """Return list of nodes that are roots of subsets not in remote
1434
1436
1435 If base dict is specified, assume that these nodes and their parents
1437 If base dict is specified, assume that these nodes and their parents
1436 exist on the remote side.
1438 exist on the remote side.
1437 If a list of heads is specified, return only nodes which are heads
1439 If a list of heads is specified, return only nodes which are heads
1438 or ancestors of these heads, and return a second element which
1440 or ancestors of these heads, and return a second element which
1439 contains all remote heads which get new children.
1441 contains all remote heads which get new children.
1440 """
1442 """
1441 if base is None:
1443 if base is None:
1442 base = {}
1444 base = {}
1443 self.findincoming(remote, base, heads, force=force)
1445 self.findincoming(remote, base, heads, force=force)
1444
1446
1445 self.ui.debug("common changesets up to "
1447 self.ui.debug("common changesets up to "
1446 + " ".join(map(short, base.keys())) + "\n")
1448 + " ".join(map(short, base.keys())) + "\n")
1447
1449
1448 remain = set(self.changelog.nodemap)
1450 remain = set(self.changelog.nodemap)
1449
1451
1450 # prune everything remote has from the tree
1452 # prune everything remote has from the tree
1451 remain.remove(nullid)
1453 remain.remove(nullid)
1452 remove = base.keys()
1454 remove = base.keys()
1453 while remove:
1455 while remove:
1454 n = remove.pop(0)
1456 n = remove.pop(0)
1455 if n in remain:
1457 if n in remain:
1456 remain.remove(n)
1458 remain.remove(n)
1457 for p in self.changelog.parents(n):
1459 for p in self.changelog.parents(n):
1458 remove.append(p)
1460 remove.append(p)
1459
1461
1460 # find every node whose parents have been pruned
1462 # find every node whose parents have been pruned
1461 subset = []
1463 subset = []
1462 # find every remote head that will get new children
1464 # find every remote head that will get new children
1463 updated_heads = set()
1465 updated_heads = set()
1464 for n in remain:
1466 for n in remain:
1465 p1, p2 = self.changelog.parents(n)
1467 p1, p2 = self.changelog.parents(n)
1466 if p1 not in remain and p2 not in remain:
1468 if p1 not in remain and p2 not in remain:
1467 subset.append(n)
1469 subset.append(n)
1468 if heads:
1470 if heads:
1469 if p1 in heads:
1471 if p1 in heads:
1470 updated_heads.add(p1)
1472 updated_heads.add(p1)
1471 if p2 in heads:
1473 if p2 in heads:
1472 updated_heads.add(p2)
1474 updated_heads.add(p2)
1473
1475
1474 # this is the set of all roots we have to push
1476 # this is the set of all roots we have to push
1475 if heads:
1477 if heads:
1476 return subset, list(updated_heads)
1478 return subset, list(updated_heads)
1477 else:
1479 else:
1478 return subset
1480 return subset
1479
1481
1480 def pull(self, remote, heads=None, force=False):
1482 def pull(self, remote, heads=None, force=False):
1481 lock = self.lock()
1483 lock = self.lock()
1482 try:
1484 try:
1483 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1485 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1484 force=force)
1486 force=force)
1485 if not fetch:
1487 if not fetch:
1486 self.ui.status(_("no changes found\n"))
1488 self.ui.status(_("no changes found\n"))
1487 return 0
1489 return 0
1488
1490
1489 if fetch == [nullid]:
1491 if fetch == [nullid]:
1490 self.ui.status(_("requesting all changes\n"))
1492 self.ui.status(_("requesting all changes\n"))
1491 elif heads is None and remote.capable('changegroupsubset'):
1493 elif heads is None and remote.capable('changegroupsubset'):
1492 # issue1320, avoid a race if remote changed after discovery
1494 # issue1320, avoid a race if remote changed after discovery
1493 heads = rheads
1495 heads = rheads
1494
1496
1495 if heads is None:
1497 if heads is None:
1496 cg = remote.changegroup(fetch, 'pull')
1498 cg = remote.changegroup(fetch, 'pull')
1497 else:
1499 else:
1498 if not remote.capable('changegroupsubset'):
1500 if not remote.capable('changegroupsubset'):
1499 raise util.Abort(_("Partial pull cannot be done because "
1501 raise util.Abort(_("Partial pull cannot be done because "
1500 "other repository doesn't support "
1502 "other repository doesn't support "
1501 "changegroupsubset."))
1503 "changegroupsubset."))
1502 cg = remote.changegroupsubset(fetch, heads, 'pull')
1504 cg = remote.changegroupsubset(fetch, heads, 'pull')
1503 return self.addchangegroup(cg, 'pull', remote.url())
1505 return self.addchangegroup(cg, 'pull', remote.url())
1504 finally:
1506 finally:
1505 lock.release()
1507 lock.release()
1506
1508
1507 def push(self, remote, force=False, revs=None):
1509 def push(self, remote, force=False, revs=None):
1508 # there are two ways to push to remote repo:
1510 # there are two ways to push to remote repo:
1509 #
1511 #
1510 # addchangegroup assumes local user can lock remote
1512 # addchangegroup assumes local user can lock remote
1511 # repo (local filesystem, old ssh servers).
1513 # repo (local filesystem, old ssh servers).
1512 #
1514 #
1513 # unbundle assumes local user cannot lock remote repo (new ssh
1515 # unbundle assumes local user cannot lock remote repo (new ssh
1514 # servers, http servers).
1516 # servers, http servers).
1515
1517
1516 if remote.capable('unbundle'):
1518 if remote.capable('unbundle'):
1517 return self.push_unbundle(remote, force, revs)
1519 return self.push_unbundle(remote, force, revs)
1518 return self.push_addchangegroup(remote, force, revs)
1520 return self.push_addchangegroup(remote, force, revs)
1519
1521
1520 def prepush(self, remote, force, revs):
1522 def prepush(self, remote, force, revs):
1521 '''Analyze the local and remote repositories and determine which
1523 '''Analyze the local and remote repositories and determine which
1522 changesets need to be pushed to the remote. Return a tuple
1524 changesets need to be pushed to the remote. Return a tuple
1523 (changegroup, remoteheads). changegroup is a readable file-like
1525 (changegroup, remoteheads). changegroup is a readable file-like
1524 object whose read() returns successive changegroup chunks ready to
1526 object whose read() returns successive changegroup chunks ready to
1525 be sent over the wire. remoteheads is the list of remote heads.
1527 be sent over the wire. remoteheads is the list of remote heads.
1526 '''
1528 '''
1527 common = {}
1529 common = {}
1528 remote_heads = remote.heads()
1530 remote_heads = remote.heads()
1529 inc = self.findincoming(remote, common, remote_heads, force=force)
1531 inc = self.findincoming(remote, common, remote_heads, force=force)
1530
1532
1531 cl = self.changelog
1533 cl = self.changelog
1532 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1534 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1533 outg, bases, heads = cl.nodesbetween(update, revs)
1535 outg, bases, heads = cl.nodesbetween(update, revs)
1534
1536
1535 if not bases:
1537 if not bases:
1536 self.ui.status(_("no changes found\n"))
1538 self.ui.status(_("no changes found\n"))
1537 return None, 1
1539 return None, 1
1538
1540
1539 if not force and remote_heads != [nullid]:
1541 if not force and remote_heads != [nullid]:
1540
1542
1541 def fail_multiple_heads(unsynced, branch=None):
1543 def fail_multiple_heads(unsynced, branch=None):
1542 if branch:
1544 if branch:
1543 msg = _("abort: push creates new remote heads"
1545 msg = _("abort: push creates new remote heads"
1544 " on branch '%s'!\n") % branch
1546 " on branch '%s'!\n") % branch
1545 else:
1547 else:
1546 msg = _("abort: push creates new remote heads!\n")
1548 msg = _("abort: push creates new remote heads!\n")
1547 self.ui.warn(msg)
1549 self.ui.warn(msg)
1548 if unsynced:
1550 if unsynced:
1549 self.ui.status(_("(you should pull and merge or"
1551 self.ui.status(_("(you should pull and merge or"
1550 " use push -f to force)\n"))
1552 " use push -f to force)\n"))
1551 else:
1553 else:
1552 self.ui.status(_("(did you forget to merge?"
1554 self.ui.status(_("(did you forget to merge?"
1553 " use push -f to force)\n"))
1555 " use push -f to force)\n"))
1554 return None, 0
1556 return None, 0
1555
1557
1556 if remote.capable('branchmap'):
1558 if remote.capable('branchmap'):
1557 # Check for each named branch if we're creating new remote heads.
1559 # Check for each named branch if we're creating new remote heads.
1558 # To be a remote head after push, node must be either:
1560 # To be a remote head after push, node must be either:
1559 # - unknown locally
1561 # - unknown locally
1560 # - a local outgoing head descended from update
1562 # - a local outgoing head descended from update
1561 # - a remote head that's known locally and not
1563 # - a remote head that's known locally and not
1562 # ancestral to an outgoing head
1564 # ancestral to an outgoing head
1563 #
1565 #
1564 # New named branches cannot be created without --force.
1566 # New named branches cannot be created without --force.
1565
1567
1566 # 1. Create set of branches involved in the push.
1568 # 1. Create set of branches involved in the push.
1567 branches = set(self[n].branch() for n in outg)
1569 branches = set(self[n].branch() for n in outg)
1568
1570
1569 # 2. Check for new branches on the remote.
1571 # 2. Check for new branches on the remote.
1570 remotemap = remote.branchmap()
1572 remotemap = remote.branchmap()
1571 newbranches = branches - set(remotemap)
1573 newbranches = branches - set(remotemap)
1572 if newbranches: # new branch requires --force
1574 if newbranches: # new branch requires --force
1573 branchnames = ', '.join("%s" % b for b in newbranches)
1575 branchnames = ', '.join("%s" % b for b in newbranches)
1574 self.ui.warn(_("abort: push creates "
1576 self.ui.warn(_("abort: push creates "
1575 "new remote branches: %s!\n")
1577 "new remote branches: %s!\n")
1576 % branchnames)
1578 % branchnames)
1577 self.ui.status(_("(use 'hg push -f' to force)\n"))
1579 self.ui.status(_("(use 'hg push -f' to force)\n"))
1578 return None, 0
1580 return None, 0
1579
1581
1580 # 3. Construct the initial oldmap and newmap dicts.
1582 # 3. Construct the initial oldmap and newmap dicts.
1581 # They contain information about the remote heads before and
1583 # They contain information about the remote heads before and
1582 # after the push, respectively.
1584 # after the push, respectively.
1583 # Heads not found locally are not included in either dict,
1585 # Heads not found locally are not included in either dict,
1584 # since they won't be affected by the push.
1586 # since they won't be affected by the push.
1585 # unsynced contains all branches with incoming changesets.
1587 # unsynced contains all branches with incoming changesets.
1586 oldmap = {}
1588 oldmap = {}
1587 newmap = {}
1589 newmap = {}
1588 unsynced = set()
1590 unsynced = set()
1589 for branch in branches:
1591 for branch in branches:
1590 remoteheads = remotemap[branch]
1592 remoteheads = remotemap[branch]
1591 prunedheads = [h for h in remoteheads if h in cl.nodemap]
1593 prunedheads = [h for h in remoteheads if h in cl.nodemap]
1592 oldmap[branch] = prunedheads
1594 oldmap[branch] = prunedheads
1593 newmap[branch] = list(prunedheads)
1595 newmap[branch] = list(prunedheads)
1594 if len(remoteheads) > len(prunedheads):
1596 if len(remoteheads) > len(prunedheads):
1595 unsynced.add(branch)
1597 unsynced.add(branch)
1596
1598
1597 # 4. Update newmap with outgoing changes.
1599 # 4. Update newmap with outgoing changes.
1598 # This will possibly add new heads and remove existing ones.
1600 # This will possibly add new heads and remove existing ones.
1599 ctxgen = (self[n] for n in outg)
1601 ctxgen = (self[n] for n in outg)
1600 self._updatebranchcache(newmap, ctxgen)
1602 self._updatebranchcache(newmap, ctxgen)
1601
1603
1602 # 5. Check for new heads.
1604 # 5. Check for new heads.
1603 # If there are more heads after the push than before, a suitable
1605 # If there are more heads after the push than before, a suitable
1604 # warning, depending on unsynced status, is displayed.
1606 # warning, depending on unsynced status, is displayed.
1605 for branch in branches:
1607 for branch in branches:
1606 if len(newmap[branch]) > len(oldmap[branch]):
1608 if len(newmap[branch]) > len(oldmap[branch]):
1607 return fail_multiple_heads(branch in unsynced, branch)
1609 return fail_multiple_heads(branch in unsynced, branch)
1608
1610
1609 # 6. Check for unsynced changes on involved branches.
1611 # 6. Check for unsynced changes on involved branches.
1610 if unsynced:
1612 if unsynced:
1611 self.ui.warn(_("note: unsynced remote changes!\n"))
1613 self.ui.warn(_("note: unsynced remote changes!\n"))
1612
1614
1613 else:
1615 else:
1614 # Old servers: Check for new topological heads.
1616 # Old servers: Check for new topological heads.
1615 # Code based on _updatebranchcache.
1617 # Code based on _updatebranchcache.
1616 newheads = set(h for h in remote_heads if h in cl.nodemap)
1618 newheads = set(h for h in remote_heads if h in cl.nodemap)
1617 oldheadcnt = len(newheads)
1619 oldheadcnt = len(newheads)
1618 newheads.update(outg)
1620 newheads.update(outg)
1619 if len(newheads) > 1:
1621 if len(newheads) > 1:
1620 for latest in reversed(outg):
1622 for latest in reversed(outg):
1621 if latest not in newheads:
1623 if latest not in newheads:
1622 continue
1624 continue
1623 minhrev = min(cl.rev(h) for h in newheads)
1625 minhrev = min(cl.rev(h) for h in newheads)
1624 reachable = cl.reachable(latest, cl.node(minhrev))
1626 reachable = cl.reachable(latest, cl.node(minhrev))
1625 reachable.remove(latest)
1627 reachable.remove(latest)
1626 newheads.difference_update(reachable)
1628 newheads.difference_update(reachable)
1627 if len(newheads) > oldheadcnt:
1629 if len(newheads) > oldheadcnt:
1628 return fail_multiple_heads(inc)
1630 return fail_multiple_heads(inc)
1629 if inc:
1631 if inc:
1630 self.ui.warn(_("note: unsynced remote changes!\n"))
1632 self.ui.warn(_("note: unsynced remote changes!\n"))
1631
1633
1632 if revs is None:
1634 if revs is None:
1633 # use the fast path, no race possible on push
1635 # use the fast path, no race possible on push
1634 nodes = self.changelog.findmissing(common.keys())
1636 nodes = self.changelog.findmissing(common.keys())
1635 cg = self._changegroup(nodes, 'push')
1637 cg = self._changegroup(nodes, 'push')
1636 else:
1638 else:
1637 cg = self.changegroupsubset(update, revs, 'push')
1639 cg = self.changegroupsubset(update, revs, 'push')
1638 return cg, remote_heads
1640 return cg, remote_heads
1639
1641
1640 def push_addchangegroup(self, remote, force, revs):
1642 def push_addchangegroup(self, remote, force, revs):
1641 lock = remote.lock()
1643 lock = remote.lock()
1642 try:
1644 try:
1643 ret = self.prepush(remote, force, revs)
1645 ret = self.prepush(remote, force, revs)
1644 if ret[0] is not None:
1646 if ret[0] is not None:
1645 cg, remote_heads = ret
1647 cg, remote_heads = ret
1646 return remote.addchangegroup(cg, 'push', self.url())
1648 return remote.addchangegroup(cg, 'push', self.url())
1647 return ret[1]
1649 return ret[1]
1648 finally:
1650 finally:
1649 lock.release()
1651 lock.release()
1650
1652
1651 def push_unbundle(self, remote, force, revs):
1653 def push_unbundle(self, remote, force, revs):
1652 # local repo finds heads on server, finds out what revs it
1654 # local repo finds heads on server, finds out what revs it
1653 # must push. once revs transferred, if server finds it has
1655 # must push. once revs transferred, if server finds it has
1654 # different heads (someone else won commit/push race), server
1656 # different heads (someone else won commit/push race), server
1655 # aborts.
1657 # aborts.
1656
1658
1657 ret = self.prepush(remote, force, revs)
1659 ret = self.prepush(remote, force, revs)
1658 if ret[0] is not None:
1660 if ret[0] is not None:
1659 cg, remote_heads = ret
1661 cg, remote_heads = ret
1660 if force:
1662 if force:
1661 remote_heads = ['force']
1663 remote_heads = ['force']
1662 return remote.unbundle(cg, remote_heads, 'push')
1664 return remote.unbundle(cg, remote_heads, 'push')
1663 return ret[1]
1665 return ret[1]
1664
1666
1665 def changegroupinfo(self, nodes, source):
1667 def changegroupinfo(self, nodes, source):
1666 if self.ui.verbose or source == 'bundle':
1668 if self.ui.verbose or source == 'bundle':
1667 self.ui.status(_("%d changesets found\n") % len(nodes))
1669 self.ui.status(_("%d changesets found\n") % len(nodes))
1668 if self.ui.debugflag:
1670 if self.ui.debugflag:
1669 self.ui.debug("list of changesets:\n")
1671 self.ui.debug("list of changesets:\n")
1670 for node in nodes:
1672 for node in nodes:
1671 self.ui.debug("%s\n" % hex(node))
1673 self.ui.debug("%s\n" % hex(node))
1672
1674
1673 def changegroupsubset(self, bases, heads, source, extranodes=None):
1675 def changegroupsubset(self, bases, heads, source, extranodes=None):
1674 """Compute a changegroup consisting of all the nodes that are
1676 """Compute a changegroup consisting of all the nodes that are
1675 descendents of any of the bases and ancestors of any of the heads.
1677 descendents of any of the bases and ancestors of any of the heads.
1676 Return a chunkbuffer object whose read() method will return
1678 Return a chunkbuffer object whose read() method will return
1677 successive changegroup chunks.
1679 successive changegroup chunks.
1678
1680
1679 It is fairly complex as determining which filenodes and which
1681 It is fairly complex as determining which filenodes and which
1680 manifest nodes need to be included for the changeset to be complete
1682 manifest nodes need to be included for the changeset to be complete
1681 is non-trivial.
1683 is non-trivial.
1682
1684
1683 Another wrinkle is doing the reverse, figuring out which changeset in
1685 Another wrinkle is doing the reverse, figuring out which changeset in
1684 the changegroup a particular filenode or manifestnode belongs to.
1686 the changegroup a particular filenode or manifestnode belongs to.
1685
1687
1686 The caller can specify some nodes that must be included in the
1688 The caller can specify some nodes that must be included in the
1687 changegroup using the extranodes argument. It should be a dict
1689 changegroup using the extranodes argument. It should be a dict
1688 where the keys are the filenames (or 1 for the manifest), and the
1690 where the keys are the filenames (or 1 for the manifest), and the
1689 values are lists of (node, linknode) tuples, where node is a wanted
1691 values are lists of (node, linknode) tuples, where node is a wanted
1690 node and linknode is the changelog node that should be transmitted as
1692 node and linknode is the changelog node that should be transmitted as
1691 the linkrev.
1693 the linkrev.
1692 """
1694 """
1693
1695
1694 # Set up some initial variables
1696 # Set up some initial variables
1695 # Make it easy to refer to self.changelog
1697 # Make it easy to refer to self.changelog
1696 cl = self.changelog
1698 cl = self.changelog
1697 # msng is short for missing - compute the list of changesets in this
1699 # msng is short for missing - compute the list of changesets in this
1698 # changegroup.
1700 # changegroup.
1699 if not bases:
1701 if not bases:
1700 bases = [nullid]
1702 bases = [nullid]
1701 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1703 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1702
1704
1703 if extranodes is None:
1705 if extranodes is None:
1704 # can we go through the fast path ?
1706 # can we go through the fast path ?
1705 heads.sort()
1707 heads.sort()
1706 allheads = self.heads()
1708 allheads = self.heads()
1707 allheads.sort()
1709 allheads.sort()
1708 if heads == allheads:
1710 if heads == allheads:
1709 return self._changegroup(msng_cl_lst, source)
1711 return self._changegroup(msng_cl_lst, source)
1710
1712
1711 # slow path
1713 # slow path
1712 self.hook('preoutgoing', throw=True, source=source)
1714 self.hook('preoutgoing', throw=True, source=source)
1713
1715
1714 self.changegroupinfo(msng_cl_lst, source)
1716 self.changegroupinfo(msng_cl_lst, source)
1715 # Some bases may turn out to be superfluous, and some heads may be
1717 # Some bases may turn out to be superfluous, and some heads may be
1716 # too. nodesbetween will return the minimal set of bases and heads
1718 # too. nodesbetween will return the minimal set of bases and heads
1717 # necessary to re-create the changegroup.
1719 # necessary to re-create the changegroup.
1718
1720
1719 # Known heads are the list of heads that it is assumed the recipient
1721 # Known heads are the list of heads that it is assumed the recipient
1720 # of this changegroup will know about.
1722 # of this changegroup will know about.
1721 knownheads = set()
1723 knownheads = set()
1722 # We assume that all parents of bases are known heads.
1724 # We assume that all parents of bases are known heads.
1723 for n in bases:
1725 for n in bases:
1724 knownheads.update(cl.parents(n))
1726 knownheads.update(cl.parents(n))
1725 knownheads.discard(nullid)
1727 knownheads.discard(nullid)
1726 knownheads = list(knownheads)
1728 knownheads = list(knownheads)
1727 if knownheads:
1729 if knownheads:
1728 # Now that we know what heads are known, we can compute which
1730 # Now that we know what heads are known, we can compute which
1729 # changesets are known. The recipient must know about all
1731 # changesets are known. The recipient must know about all
1730 # changesets required to reach the known heads from the null
1732 # changesets required to reach the known heads from the null
1731 # changeset.
1733 # changeset.
1732 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1734 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1733 junk = None
1735 junk = None
1734 # Transform the list into a set.
1736 # Transform the list into a set.
1735 has_cl_set = set(has_cl_set)
1737 has_cl_set = set(has_cl_set)
1736 else:
1738 else:
1737 # If there were no known heads, the recipient cannot be assumed to
1739 # If there were no known heads, the recipient cannot be assumed to
1738 # know about any changesets.
1740 # know about any changesets.
1739 has_cl_set = set()
1741 has_cl_set = set()
1740
1742
1741 # Make it easy to refer to self.manifest
1743 # Make it easy to refer to self.manifest
1742 mnfst = self.manifest
1744 mnfst = self.manifest
1743 # We don't know which manifests are missing yet
1745 # We don't know which manifests are missing yet
1744 msng_mnfst_set = {}
1746 msng_mnfst_set = {}
1745 # Nor do we know which filenodes are missing.
1747 # Nor do we know which filenodes are missing.
1746 msng_filenode_set = {}
1748 msng_filenode_set = {}
1747
1749
1748 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1750 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1749 junk = None
1751 junk = None
1750
1752
1751 # A changeset always belongs to itself, so the changenode lookup
1753 # A changeset always belongs to itself, so the changenode lookup
1752 # function for a changenode is identity.
1754 # function for a changenode is identity.
1753 def identity(x):
1755 def identity(x):
1754 return x
1756 return x
1755
1757
1756 # If we determine that a particular file or manifest node must be a
1758 # If we determine that a particular file or manifest node must be a
1757 # node that the recipient of the changegroup will already have, we can
1759 # node that the recipient of the changegroup will already have, we can
1758 # also assume the recipient will have all the parents. This function
1760 # also assume the recipient will have all the parents. This function
1759 # prunes them from the set of missing nodes.
1761 # prunes them from the set of missing nodes.
1760 def prune_parents(revlog, hasset, msngset):
1762 def prune_parents(revlog, hasset, msngset):
1761 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1763 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1762 msngset.pop(revlog.node(r), None)
1764 msngset.pop(revlog.node(r), None)
1763
1765
1764 # Use the information collected in collect_manifests_and_files to say
1766 # Use the information collected in collect_manifests_and_files to say
1765 # which changenode any manifestnode belongs to.
1767 # which changenode any manifestnode belongs to.
1766 def lookup_manifest_link(mnfstnode):
1768 def lookup_manifest_link(mnfstnode):
1767 return msng_mnfst_set[mnfstnode]
1769 return msng_mnfst_set[mnfstnode]
1768
1770
1769 # A function generating function that sets up the initial environment
1771 # A function generating function that sets up the initial environment
1770 # the inner function.
1772 # the inner function.
1771 def filenode_collector(changedfiles):
1773 def filenode_collector(changedfiles):
1772 # This gathers information from each manifestnode included in the
1774 # This gathers information from each manifestnode included in the
1773 # changegroup about which filenodes the manifest node references
1775 # changegroup about which filenodes the manifest node references
1774 # so we can include those in the changegroup too.
1776 # so we can include those in the changegroup too.
1775 #
1777 #
1776 # It also remembers which changenode each filenode belongs to. It
1778 # It also remembers which changenode each filenode belongs to. It
1777 # does this by assuming the a filenode belongs to the changenode
1779 # does this by assuming the a filenode belongs to the changenode
1778 # the first manifest that references it belongs to.
1780 # the first manifest that references it belongs to.
1779 def collect_msng_filenodes(mnfstnode):
1781 def collect_msng_filenodes(mnfstnode):
1780 r = mnfst.rev(mnfstnode)
1782 r = mnfst.rev(mnfstnode)
1781 if r - 1 in mnfst.parentrevs(r):
1783 if r - 1 in mnfst.parentrevs(r):
1782 # If the previous rev is one of the parents,
1784 # If the previous rev is one of the parents,
1783 # we only need to see a diff.
1785 # we only need to see a diff.
1784 deltamf = mnfst.readdelta(mnfstnode)
1786 deltamf = mnfst.readdelta(mnfstnode)
1785 # For each line in the delta
1787 # For each line in the delta
1786 for f, fnode in deltamf.iteritems():
1788 for f, fnode in deltamf.iteritems():
1787 f = changedfiles.get(f, None)
1789 f = changedfiles.get(f, None)
1788 # And if the file is in the list of files we care
1790 # And if the file is in the list of files we care
1789 # about.
1791 # about.
1790 if f is not None:
1792 if f is not None:
1791 # Get the changenode this manifest belongs to
1793 # Get the changenode this manifest belongs to
1792 clnode = msng_mnfst_set[mnfstnode]
1794 clnode = msng_mnfst_set[mnfstnode]
1793 # Create the set of filenodes for the file if
1795 # Create the set of filenodes for the file if
1794 # there isn't one already.
1796 # there isn't one already.
1795 ndset = msng_filenode_set.setdefault(f, {})
1797 ndset = msng_filenode_set.setdefault(f, {})
1796 # And set the filenode's changelog node to the
1798 # And set the filenode's changelog node to the
1797 # manifest's if it hasn't been set already.
1799 # manifest's if it hasn't been set already.
1798 ndset.setdefault(fnode, clnode)
1800 ndset.setdefault(fnode, clnode)
1799 else:
1801 else:
1800 # Otherwise we need a full manifest.
1802 # Otherwise we need a full manifest.
1801 m = mnfst.read(mnfstnode)
1803 m = mnfst.read(mnfstnode)
1802 # For every file in we care about.
1804 # For every file in we care about.
1803 for f in changedfiles:
1805 for f in changedfiles:
1804 fnode = m.get(f, None)
1806 fnode = m.get(f, None)
1805 # If it's in the manifest
1807 # If it's in the manifest
1806 if fnode is not None:
1808 if fnode is not None:
1807 # See comments above.
1809 # See comments above.
1808 clnode = msng_mnfst_set[mnfstnode]
1810 clnode = msng_mnfst_set[mnfstnode]
1809 ndset = msng_filenode_set.setdefault(f, {})
1811 ndset = msng_filenode_set.setdefault(f, {})
1810 ndset.setdefault(fnode, clnode)
1812 ndset.setdefault(fnode, clnode)
1811 return collect_msng_filenodes
1813 return collect_msng_filenodes
1812
1814
1813 # We have a list of filenodes we think we need for a file, lets remove
1815 # We have a list of filenodes we think we need for a file, lets remove
1814 # all those we know the recipient must have.
1816 # all those we know the recipient must have.
1815 def prune_filenodes(f, filerevlog):
1817 def prune_filenodes(f, filerevlog):
1816 msngset = msng_filenode_set[f]
1818 msngset = msng_filenode_set[f]
1817 hasset = set()
1819 hasset = set()
1818 # If a 'missing' filenode thinks it belongs to a changenode we
1820 # If a 'missing' filenode thinks it belongs to a changenode we
1819 # assume the recipient must have, then the recipient must have
1821 # assume the recipient must have, then the recipient must have
1820 # that filenode.
1822 # that filenode.
1821 for n in msngset:
1823 for n in msngset:
1822 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1824 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1823 if clnode in has_cl_set:
1825 if clnode in has_cl_set:
1824 hasset.add(n)
1826 hasset.add(n)
1825 prune_parents(filerevlog, hasset, msngset)
1827 prune_parents(filerevlog, hasset, msngset)
1826
1828
1827 # A function generator function that sets up the a context for the
1829 # A function generator function that sets up the a context for the
1828 # inner function.
1830 # inner function.
1829 def lookup_filenode_link_func(fname):
1831 def lookup_filenode_link_func(fname):
1830 msngset = msng_filenode_set[fname]
1832 msngset = msng_filenode_set[fname]
1831 # Lookup the changenode the filenode belongs to.
1833 # Lookup the changenode the filenode belongs to.
1832 def lookup_filenode_link(fnode):
1834 def lookup_filenode_link(fnode):
1833 return msngset[fnode]
1835 return msngset[fnode]
1834 return lookup_filenode_link
1836 return lookup_filenode_link
1835
1837
1836 # Add the nodes that were explicitly requested.
1838 # Add the nodes that were explicitly requested.
1837 def add_extra_nodes(name, nodes):
1839 def add_extra_nodes(name, nodes):
1838 if not extranodes or name not in extranodes:
1840 if not extranodes or name not in extranodes:
1839 return
1841 return
1840
1842
1841 for node, linknode in extranodes[name]:
1843 for node, linknode in extranodes[name]:
1842 if node not in nodes:
1844 if node not in nodes:
1843 nodes[node] = linknode
1845 nodes[node] = linknode
1844
1846
1845 # Now that we have all theses utility functions to help out and
1847 # Now that we have all theses utility functions to help out and
1846 # logically divide up the task, generate the group.
1848 # logically divide up the task, generate the group.
1847 def gengroup():
1849 def gengroup():
1848 # The set of changed files starts empty.
1850 # The set of changed files starts empty.
1849 changedfiles = {}
1851 changedfiles = {}
1850 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1852 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1851
1853
1852 # Create a changenode group generator that will call our functions
1854 # Create a changenode group generator that will call our functions
1853 # back to lookup the owning changenode and collect information.
1855 # back to lookup the owning changenode and collect information.
1854 group = cl.group(msng_cl_lst, identity, collect)
1856 group = cl.group(msng_cl_lst, identity, collect)
1855 cnt = 0
1857 cnt = 0
1856 for chnk in group:
1858 for chnk in group:
1857 yield chnk
1859 yield chnk
1858 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1860 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1859 cnt += 1
1861 cnt += 1
1860 self.ui.progress(_('bundling changes'), None)
1862 self.ui.progress(_('bundling changes'), None)
1861
1863
1862
1864
1863 # Figure out which manifest nodes (of the ones we think might be
1865 # Figure out which manifest nodes (of the ones we think might be
1864 # part of the changegroup) the recipient must know about and
1866 # part of the changegroup) the recipient must know about and
1865 # remove them from the changegroup.
1867 # remove them from the changegroup.
1866 has_mnfst_set = set()
1868 has_mnfst_set = set()
1867 for n in msng_mnfst_set:
1869 for n in msng_mnfst_set:
1868 # If a 'missing' manifest thinks it belongs to a changenode
1870 # If a 'missing' manifest thinks it belongs to a changenode
1869 # the recipient is assumed to have, obviously the recipient
1871 # the recipient is assumed to have, obviously the recipient
1870 # must have that manifest.
1872 # must have that manifest.
1871 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1873 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1872 if linknode in has_cl_set:
1874 if linknode in has_cl_set:
1873 has_mnfst_set.add(n)
1875 has_mnfst_set.add(n)
1874 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1876 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1875 add_extra_nodes(1, msng_mnfst_set)
1877 add_extra_nodes(1, msng_mnfst_set)
1876 msng_mnfst_lst = msng_mnfst_set.keys()
1878 msng_mnfst_lst = msng_mnfst_set.keys()
1877 # Sort the manifestnodes by revision number.
1879 # Sort the manifestnodes by revision number.
1878 msng_mnfst_lst.sort(key=mnfst.rev)
1880 msng_mnfst_lst.sort(key=mnfst.rev)
1879 # Create a generator for the manifestnodes that calls our lookup
1881 # Create a generator for the manifestnodes that calls our lookup
1880 # and data collection functions back.
1882 # and data collection functions back.
1881 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1883 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1882 filenode_collector(changedfiles))
1884 filenode_collector(changedfiles))
1883 cnt = 0
1885 cnt = 0
1884 for chnk in group:
1886 for chnk in group:
1885 yield chnk
1887 yield chnk
1886 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1888 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1887 cnt += 1
1889 cnt += 1
1888 self.ui.progress(_('bundling manifests'), None)
1890 self.ui.progress(_('bundling manifests'), None)
1889
1891
1890 # These are no longer needed, dereference and toss the memory for
1892 # These are no longer needed, dereference and toss the memory for
1891 # them.
1893 # them.
1892 msng_mnfst_lst = None
1894 msng_mnfst_lst = None
1893 msng_mnfst_set.clear()
1895 msng_mnfst_set.clear()
1894
1896
1895 if extranodes:
1897 if extranodes:
1896 for fname in extranodes:
1898 for fname in extranodes:
1897 if isinstance(fname, int):
1899 if isinstance(fname, int):
1898 continue
1900 continue
1899 msng_filenode_set.setdefault(fname, {})
1901 msng_filenode_set.setdefault(fname, {})
1900 changedfiles[fname] = 1
1902 changedfiles[fname] = 1
1901 # Go through all our files in order sorted by name.
1903 # Go through all our files in order sorted by name.
1902 cnt = 0
1904 cnt = 0
1903 for fname in sorted(changedfiles):
1905 for fname in sorted(changedfiles):
1904 filerevlog = self.file(fname)
1906 filerevlog = self.file(fname)
1905 if not len(filerevlog):
1907 if not len(filerevlog):
1906 raise util.Abort(_("empty or missing revlog for %s") % fname)
1908 raise util.Abort(_("empty or missing revlog for %s") % fname)
1907 # Toss out the filenodes that the recipient isn't really
1909 # Toss out the filenodes that the recipient isn't really
1908 # missing.
1910 # missing.
1909 if fname in msng_filenode_set:
1911 if fname in msng_filenode_set:
1910 prune_filenodes(fname, filerevlog)
1912 prune_filenodes(fname, filerevlog)
1911 add_extra_nodes(fname, msng_filenode_set[fname])
1913 add_extra_nodes(fname, msng_filenode_set[fname])
1912 msng_filenode_lst = msng_filenode_set[fname].keys()
1914 msng_filenode_lst = msng_filenode_set[fname].keys()
1913 else:
1915 else:
1914 msng_filenode_lst = []
1916 msng_filenode_lst = []
1915 # If any filenodes are left, generate the group for them,
1917 # If any filenodes are left, generate the group for them,
1916 # otherwise don't bother.
1918 # otherwise don't bother.
1917 if len(msng_filenode_lst) > 0:
1919 if len(msng_filenode_lst) > 0:
1918 yield changegroup.chunkheader(len(fname))
1920 yield changegroup.chunkheader(len(fname))
1919 yield fname
1921 yield fname
1920 # Sort the filenodes by their revision #
1922 # Sort the filenodes by their revision #
1921 msng_filenode_lst.sort(key=filerevlog.rev)
1923 msng_filenode_lst.sort(key=filerevlog.rev)
1922 # Create a group generator and only pass in a changenode
1924 # Create a group generator and only pass in a changenode
1923 # lookup function as we need to collect no information
1925 # lookup function as we need to collect no information
1924 # from filenodes.
1926 # from filenodes.
1925 group = filerevlog.group(msng_filenode_lst,
1927 group = filerevlog.group(msng_filenode_lst,
1926 lookup_filenode_link_func(fname))
1928 lookup_filenode_link_func(fname))
1927 for chnk in group:
1929 for chnk in group:
1928 self.ui.progress(
1930 self.ui.progress(
1929 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1931 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1930 cnt += 1
1932 cnt += 1
1931 yield chnk
1933 yield chnk
1932 if fname in msng_filenode_set:
1934 if fname in msng_filenode_set:
1933 # Don't need this anymore, toss it to free memory.
1935 # Don't need this anymore, toss it to free memory.
1934 del msng_filenode_set[fname]
1936 del msng_filenode_set[fname]
1935 # Signal that no more groups are left.
1937 # Signal that no more groups are left.
1936 yield changegroup.closechunk()
1938 yield changegroup.closechunk()
1937 self.ui.progress(_('bundling files'), None)
1939 self.ui.progress(_('bundling files'), None)
1938
1940
1939 if msng_cl_lst:
1941 if msng_cl_lst:
1940 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1942 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1941
1943
1942 return util.chunkbuffer(gengroup())
1944 return util.chunkbuffer(gengroup())
1943
1945
1944 def changegroup(self, basenodes, source):
1946 def changegroup(self, basenodes, source):
1945 # to avoid a race we use changegroupsubset() (issue1320)
1947 # to avoid a race we use changegroupsubset() (issue1320)
1946 return self.changegroupsubset(basenodes, self.heads(), source)
1948 return self.changegroupsubset(basenodes, self.heads(), source)
1947
1949
1948 def _changegroup(self, nodes, source):
1950 def _changegroup(self, nodes, source):
1949 """Compute the changegroup of all nodes that we have that a recipient
1951 """Compute the changegroup of all nodes that we have that a recipient
1950 doesn't. Return a chunkbuffer object whose read() method will return
1952 doesn't. Return a chunkbuffer object whose read() method will return
1951 successive changegroup chunks.
1953 successive changegroup chunks.
1952
1954
1953 This is much easier than the previous function as we can assume that
1955 This is much easier than the previous function as we can assume that
1954 the recipient has any changenode we aren't sending them.
1956 the recipient has any changenode we aren't sending them.
1955
1957
1956 nodes is the set of nodes to send"""
1958 nodes is the set of nodes to send"""
1957
1959
1958 self.hook('preoutgoing', throw=True, source=source)
1960 self.hook('preoutgoing', throw=True, source=source)
1959
1961
1960 cl = self.changelog
1962 cl = self.changelog
1961 revset = set([cl.rev(n) for n in nodes])
1963 revset = set([cl.rev(n) for n in nodes])
1962 self.changegroupinfo(nodes, source)
1964 self.changegroupinfo(nodes, source)
1963
1965
1964 def identity(x):
1966 def identity(x):
1965 return x
1967 return x
1966
1968
1967 def gennodelst(log):
1969 def gennodelst(log):
1968 for r in log:
1970 for r in log:
1969 if log.linkrev(r) in revset:
1971 if log.linkrev(r) in revset:
1970 yield log.node(r)
1972 yield log.node(r)
1971
1973
1972 def lookuprevlink_func(revlog):
1974 def lookuprevlink_func(revlog):
1973 def lookuprevlink(n):
1975 def lookuprevlink(n):
1974 return cl.node(revlog.linkrev(revlog.rev(n)))
1976 return cl.node(revlog.linkrev(revlog.rev(n)))
1975 return lookuprevlink
1977 return lookuprevlink
1976
1978
1977 def gengroup():
1979 def gengroup():
1978 '''yield a sequence of changegroup chunks (strings)'''
1980 '''yield a sequence of changegroup chunks (strings)'''
1979 # construct a list of all changed files
1981 # construct a list of all changed files
1980 changedfiles = {}
1982 changedfiles = {}
1981 mmfs = {}
1983 mmfs = {}
1982 collect = changegroup.collector(cl, mmfs, changedfiles)
1984 collect = changegroup.collector(cl, mmfs, changedfiles)
1983
1985
1984 cnt = 0
1986 cnt = 0
1985 for chnk in cl.group(nodes, identity, collect):
1987 for chnk in cl.group(nodes, identity, collect):
1986 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1988 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1987 cnt += 1
1989 cnt += 1
1988 yield chnk
1990 yield chnk
1989 self.ui.progress(_('bundling changes'), None)
1991 self.ui.progress(_('bundling changes'), None)
1990
1992
1991 mnfst = self.manifest
1993 mnfst = self.manifest
1992 nodeiter = gennodelst(mnfst)
1994 nodeiter = gennodelst(mnfst)
1993 cnt = 0
1995 cnt = 0
1994 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1996 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1995 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1997 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1996 cnt += 1
1998 cnt += 1
1997 yield chnk
1999 yield chnk
1998 self.ui.progress(_('bundling manifests'), None)
2000 self.ui.progress(_('bundling manifests'), None)
1999
2001
2000 cnt = 0
2002 cnt = 0
2001 for fname in sorted(changedfiles):
2003 for fname in sorted(changedfiles):
2002 filerevlog = self.file(fname)
2004 filerevlog = self.file(fname)
2003 if not len(filerevlog):
2005 if not len(filerevlog):
2004 raise util.Abort(_("empty or missing revlog for %s") % fname)
2006 raise util.Abort(_("empty or missing revlog for %s") % fname)
2005 nodeiter = gennodelst(filerevlog)
2007 nodeiter = gennodelst(filerevlog)
2006 nodeiter = list(nodeiter)
2008 nodeiter = list(nodeiter)
2007 if nodeiter:
2009 if nodeiter:
2008 yield changegroup.chunkheader(len(fname))
2010 yield changegroup.chunkheader(len(fname))
2009 yield fname
2011 yield fname
2010 lookup = lookuprevlink_func(filerevlog)
2012 lookup = lookuprevlink_func(filerevlog)
2011 for chnk in filerevlog.group(nodeiter, lookup):
2013 for chnk in filerevlog.group(nodeiter, lookup):
2012 self.ui.progress(
2014 self.ui.progress(
2013 _('bundling files'), cnt, item=fname, unit=_('chunks'))
2015 _('bundling files'), cnt, item=fname, unit=_('chunks'))
2014 cnt += 1
2016 cnt += 1
2015 yield chnk
2017 yield chnk
2016 self.ui.progress(_('bundling files'), None)
2018 self.ui.progress(_('bundling files'), None)
2017
2019
2018 yield changegroup.closechunk()
2020 yield changegroup.closechunk()
2019
2021
2020 if nodes:
2022 if nodes:
2021 self.hook('outgoing', node=hex(nodes[0]), source=source)
2023 self.hook('outgoing', node=hex(nodes[0]), source=source)
2022
2024
2023 return util.chunkbuffer(gengroup())
2025 return util.chunkbuffer(gengroup())
2024
2026
2025 def addchangegroup(self, source, srctype, url, emptyok=False):
2027 def addchangegroup(self, source, srctype, url, emptyok=False):
2026 """add changegroup to repo.
2028 """add changegroup to repo.
2027
2029
2028 return values:
2030 return values:
2029 - nothing changed or no source: 0
2031 - nothing changed or no source: 0
2030 - more heads than before: 1+added heads (2..n)
2032 - more heads than before: 1+added heads (2..n)
2031 - less heads than before: -1-removed heads (-2..-n)
2033 - less heads than before: -1-removed heads (-2..-n)
2032 - number of heads stays the same: 1
2034 - number of heads stays the same: 1
2033 """
2035 """
2034 def csmap(x):
2036 def csmap(x):
2035 self.ui.debug("add changeset %s\n" % short(x))
2037 self.ui.debug("add changeset %s\n" % short(x))
2036 return len(cl)
2038 return len(cl)
2037
2039
2038 def revmap(x):
2040 def revmap(x):
2039 return cl.rev(x)
2041 return cl.rev(x)
2040
2042
2041 if not source:
2043 if not source:
2042 return 0
2044 return 0
2043
2045
2044 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2046 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2045
2047
2046 changesets = files = revisions = 0
2048 changesets = files = revisions = 0
2047 efiles = set()
2049 efiles = set()
2048
2050
2049 # write changelog data to temp files so concurrent readers will not see
2051 # write changelog data to temp files so concurrent readers will not see
2050 # inconsistent view
2052 # inconsistent view
2051 cl = self.changelog
2053 cl = self.changelog
2052 cl.delayupdate()
2054 cl.delayupdate()
2053 oldheads = len(cl.heads())
2055 oldheads = len(cl.heads())
2054
2056
2055 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
2057 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
2056 try:
2058 try:
2057 trp = weakref.proxy(tr)
2059 trp = weakref.proxy(tr)
2058 # pull off the changeset group
2060 # pull off the changeset group
2059 self.ui.status(_("adding changesets\n"))
2061 self.ui.status(_("adding changesets\n"))
2060 clstart = len(cl)
2062 clstart = len(cl)
2061 class prog(object):
2063 class prog(object):
2062 step = _('changesets')
2064 step = _('changesets')
2063 count = 1
2065 count = 1
2064 ui = self.ui
2066 ui = self.ui
2065 total = None
2067 total = None
2066 def __call__(self):
2068 def __call__(self):
2067 self.ui.progress(self.step, self.count, unit=_('chunks'),
2069 self.ui.progress(self.step, self.count, unit=_('chunks'),
2068 total=self.total)
2070 total=self.total)
2069 self.count += 1
2071 self.count += 1
2070 pr = prog()
2072 pr = prog()
2071 chunkiter = changegroup.chunkiter(source, progress=pr)
2073 chunkiter = changegroup.chunkiter(source, progress=pr)
2072 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2074 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2073 raise util.Abort(_("received changelog group is empty"))
2075 raise util.Abort(_("received changelog group is empty"))
2074 clend = len(cl)
2076 clend = len(cl)
2075 changesets = clend - clstart
2077 changesets = clend - clstart
2076 for c in xrange(clstart, clend):
2078 for c in xrange(clstart, clend):
2077 efiles.update(self[c].files())
2079 efiles.update(self[c].files())
2078 efiles = len(efiles)
2080 efiles = len(efiles)
2079 self.ui.progress(_('changesets'), None)
2081 self.ui.progress(_('changesets'), None)
2080
2082
2081 # pull off the manifest group
2083 # pull off the manifest group
2082 self.ui.status(_("adding manifests\n"))
2084 self.ui.status(_("adding manifests\n"))
2083 pr.step = _('manifests')
2085 pr.step = _('manifests')
2084 pr.count = 1
2086 pr.count = 1
2085 pr.total = changesets # manifests <= changesets
2087 pr.total = changesets # manifests <= changesets
2086 chunkiter = changegroup.chunkiter(source, progress=pr)
2088 chunkiter = changegroup.chunkiter(source, progress=pr)
2087 # no need to check for empty manifest group here:
2089 # no need to check for empty manifest group here:
2088 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2090 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2089 # no new manifest will be created and the manifest group will
2091 # no new manifest will be created and the manifest group will
2090 # be empty during the pull
2092 # be empty during the pull
2091 self.manifest.addgroup(chunkiter, revmap, trp)
2093 self.manifest.addgroup(chunkiter, revmap, trp)
2092 self.ui.progress(_('manifests'), None)
2094 self.ui.progress(_('manifests'), None)
2093
2095
2094 needfiles = {}
2096 needfiles = {}
2095 if self.ui.configbool('server', 'validate', default=False):
2097 if self.ui.configbool('server', 'validate', default=False):
2096 # validate incoming csets have their manifests
2098 # validate incoming csets have their manifests
2097 for cset in xrange(clstart, clend):
2099 for cset in xrange(clstart, clend):
2098 mfest = self.changelog.read(self.changelog.node(cset))[0]
2100 mfest = self.changelog.read(self.changelog.node(cset))[0]
2099 mfest = self.manifest.readdelta(mfest)
2101 mfest = self.manifest.readdelta(mfest)
2100 # store file nodes we must see
2102 # store file nodes we must see
2101 for f, n in mfest.iteritems():
2103 for f, n in mfest.iteritems():
2102 needfiles.setdefault(f, set()).add(n)
2104 needfiles.setdefault(f, set()).add(n)
2103
2105
2104 # process the files
2106 # process the files
2105 self.ui.status(_("adding file changes\n"))
2107 self.ui.status(_("adding file changes\n"))
2106 pr.step = 'files'
2108 pr.step = 'files'
2107 pr.count = 1
2109 pr.count = 1
2108 pr.total = efiles
2110 pr.total = efiles
2109 while 1:
2111 while 1:
2110 f = changegroup.getchunk(source)
2112 f = changegroup.getchunk(source)
2111 if not f:
2113 if not f:
2112 break
2114 break
2113 self.ui.debug("adding %s revisions\n" % f)
2115 self.ui.debug("adding %s revisions\n" % f)
2114 pr()
2116 pr()
2115 fl = self.file(f)
2117 fl = self.file(f)
2116 o = len(fl)
2118 o = len(fl)
2117 chunkiter = changegroup.chunkiter(source)
2119 chunkiter = changegroup.chunkiter(source)
2118 if fl.addgroup(chunkiter, revmap, trp) is None:
2120 if fl.addgroup(chunkiter, revmap, trp) is None:
2119 raise util.Abort(_("received file revlog group is empty"))
2121 raise util.Abort(_("received file revlog group is empty"))
2120 revisions += len(fl) - o
2122 revisions += len(fl) - o
2121 files += 1
2123 files += 1
2122 if f in needfiles:
2124 if f in needfiles:
2123 needs = needfiles[f]
2125 needs = needfiles[f]
2124 for new in xrange(o, len(fl)):
2126 for new in xrange(o, len(fl)):
2125 n = fl.node(new)
2127 n = fl.node(new)
2126 if n in needs:
2128 if n in needs:
2127 needs.remove(n)
2129 needs.remove(n)
2128 if not needs:
2130 if not needs:
2129 del needfiles[f]
2131 del needfiles[f]
2130 self.ui.progress(_('files'), None)
2132 self.ui.progress(_('files'), None)
2131
2133
2132 for f, needs in needfiles.iteritems():
2134 for f, needs in needfiles.iteritems():
2133 fl = self.file(f)
2135 fl = self.file(f)
2134 for n in needs:
2136 for n in needs:
2135 try:
2137 try:
2136 fl.rev(n)
2138 fl.rev(n)
2137 except error.LookupError:
2139 except error.LookupError:
2138 raise util.Abort(
2140 raise util.Abort(
2139 _('missing file data for %s:%s - run hg verify') %
2141 _('missing file data for %s:%s - run hg verify') %
2140 (f, hex(n)))
2142 (f, hex(n)))
2141
2143
2142 newheads = len(cl.heads())
2144 newheads = len(cl.heads())
2143 heads = ""
2145 heads = ""
2144 if oldheads and newheads != oldheads:
2146 if oldheads and newheads != oldheads:
2145 heads = _(" (%+d heads)") % (newheads - oldheads)
2147 heads = _(" (%+d heads)") % (newheads - oldheads)
2146
2148
2147 self.ui.status(_("added %d changesets"
2149 self.ui.status(_("added %d changesets"
2148 " with %d changes to %d files%s\n")
2150 " with %d changes to %d files%s\n")
2149 % (changesets, revisions, files, heads))
2151 % (changesets, revisions, files, heads))
2150
2152
2151 if changesets > 0:
2153 if changesets > 0:
2152 p = lambda: cl.writepending() and self.root or ""
2154 p = lambda: cl.writepending() and self.root or ""
2153 self.hook('pretxnchangegroup', throw=True,
2155 self.hook('pretxnchangegroup', throw=True,
2154 node=hex(cl.node(clstart)), source=srctype,
2156 node=hex(cl.node(clstart)), source=srctype,
2155 url=url, pending=p)
2157 url=url, pending=p)
2156
2158
2157 # make changelog see real files again
2159 # make changelog see real files again
2158 cl.finalize(trp)
2160 cl.finalize(trp)
2159
2161
2160 tr.close()
2162 tr.close()
2161 finally:
2163 finally:
2162 del tr
2164 del tr
2163
2165
2164 if changesets > 0:
2166 if changesets > 0:
2165 # forcefully update the on-disk branch cache
2167 # forcefully update the on-disk branch cache
2166 self.ui.debug("updating the branch cache\n")
2168 self.ui.debug("updating the branch cache\n")
2167 self.branchtags()
2169 self.branchtags()
2168 self.hook("changegroup", node=hex(cl.node(clstart)),
2170 self.hook("changegroup", node=hex(cl.node(clstart)),
2169 source=srctype, url=url)
2171 source=srctype, url=url)
2170
2172
2171 for i in xrange(clstart, clend):
2173 for i in xrange(clstart, clend):
2172 self.hook("incoming", node=hex(cl.node(i)),
2174 self.hook("incoming", node=hex(cl.node(i)),
2173 source=srctype, url=url)
2175 source=srctype, url=url)
2174
2176
2175 # never return 0 here:
2177 # never return 0 here:
2176 if newheads < oldheads:
2178 if newheads < oldheads:
2177 return newheads - oldheads - 1
2179 return newheads - oldheads - 1
2178 else:
2180 else:
2179 return newheads - oldheads + 1
2181 return newheads - oldheads + 1
2180
2182
2181
2183
2182 def stream_in(self, remote):
2184 def stream_in(self, remote):
2183 fp = remote.stream_out()
2185 fp = remote.stream_out()
2184 l = fp.readline()
2186 l = fp.readline()
2185 try:
2187 try:
2186 resp = int(l)
2188 resp = int(l)
2187 except ValueError:
2189 except ValueError:
2188 raise error.ResponseError(
2190 raise error.ResponseError(
2189 _('Unexpected response from remote server:'), l)
2191 _('Unexpected response from remote server:'), l)
2190 if resp == 1:
2192 if resp == 1:
2191 raise util.Abort(_('operation forbidden by server'))
2193 raise util.Abort(_('operation forbidden by server'))
2192 elif resp == 2:
2194 elif resp == 2:
2193 raise util.Abort(_('locking the remote repository failed'))
2195 raise util.Abort(_('locking the remote repository failed'))
2194 elif resp != 0:
2196 elif resp != 0:
2195 raise util.Abort(_('the server sent an unknown error code'))
2197 raise util.Abort(_('the server sent an unknown error code'))
2196 self.ui.status(_('streaming all changes\n'))
2198 self.ui.status(_('streaming all changes\n'))
2197 l = fp.readline()
2199 l = fp.readline()
2198 try:
2200 try:
2199 total_files, total_bytes = map(int, l.split(' ', 1))
2201 total_files, total_bytes = map(int, l.split(' ', 1))
2200 except (ValueError, TypeError):
2202 except (ValueError, TypeError):
2201 raise error.ResponseError(
2203 raise error.ResponseError(
2202 _('Unexpected response from remote server:'), l)
2204 _('Unexpected response from remote server:'), l)
2203 self.ui.status(_('%d files to transfer, %s of data\n') %
2205 self.ui.status(_('%d files to transfer, %s of data\n') %
2204 (total_files, util.bytecount(total_bytes)))
2206 (total_files, util.bytecount(total_bytes)))
2205 start = time.time()
2207 start = time.time()
2206 for i in xrange(total_files):
2208 for i in xrange(total_files):
2207 # XXX doesn't support '\n' or '\r' in filenames
2209 # XXX doesn't support '\n' or '\r' in filenames
2208 l = fp.readline()
2210 l = fp.readline()
2209 try:
2211 try:
2210 name, size = l.split('\0', 1)
2212 name, size = l.split('\0', 1)
2211 size = int(size)
2213 size = int(size)
2212 except (ValueError, TypeError):
2214 except (ValueError, TypeError):
2213 raise error.ResponseError(
2215 raise error.ResponseError(
2214 _('Unexpected response from remote server:'), l)
2216 _('Unexpected response from remote server:'), l)
2215 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2217 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2216 # for backwards compat, name was partially encoded
2218 # for backwards compat, name was partially encoded
2217 ofp = self.sopener(store.decodedir(name), 'w')
2219 ofp = self.sopener(store.decodedir(name), 'w')
2218 for chunk in util.filechunkiter(fp, limit=size):
2220 for chunk in util.filechunkiter(fp, limit=size):
2219 ofp.write(chunk)
2221 ofp.write(chunk)
2220 ofp.close()
2222 ofp.close()
2221 elapsed = time.time() - start
2223 elapsed = time.time() - start
2222 if elapsed <= 0:
2224 if elapsed <= 0:
2223 elapsed = 0.001
2225 elapsed = 0.001
2224 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2226 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2225 (util.bytecount(total_bytes), elapsed,
2227 (util.bytecount(total_bytes), elapsed,
2226 util.bytecount(total_bytes / elapsed)))
2228 util.bytecount(total_bytes / elapsed)))
2227 self.invalidate()
2229 self.invalidate()
2228 return len(self.heads()) + 1
2230 return len(self.heads()) + 1
2229
2231
2230 def clone(self, remote, heads=[], stream=False):
2232 def clone(self, remote, heads=[], stream=False):
2231 '''clone remote repository.
2233 '''clone remote repository.
2232
2234
2233 keyword arguments:
2235 keyword arguments:
2234 heads: list of revs to clone (forces use of pull)
2236 heads: list of revs to clone (forces use of pull)
2235 stream: use streaming clone if possible'''
2237 stream: use streaming clone if possible'''
2236
2238
2237 # now, all clients that can request uncompressed clones can
2239 # now, all clients that can request uncompressed clones can
2238 # read repo formats supported by all servers that can serve
2240 # read repo formats supported by all servers that can serve
2239 # them.
2241 # them.
2240
2242
2241 # if revlog format changes, client will have to check version
2243 # if revlog format changes, client will have to check version
2242 # and format flags on "stream" capability, and use
2244 # and format flags on "stream" capability, and use
2243 # uncompressed only if compatible.
2245 # uncompressed only if compatible.
2244
2246
2245 if stream and not heads and remote.capable('stream'):
2247 if stream and not heads and remote.capable('stream'):
2246 return self.stream_in(remote)
2248 return self.stream_in(remote)
2247 return self.pull(remote, heads)
2249 return self.pull(remote, heads)
2248
2250
2249 # used to avoid circular references so destructors work
2251 # used to avoid circular references so destructors work
2250 def aftertrans(files):
2252 def aftertrans(files):
2251 renamefiles = [tuple(t) for t in files]
2253 renamefiles = [tuple(t) for t in files]
2252 def a():
2254 def a():
2253 for src, dest in renamefiles:
2255 for src, dest in renamefiles:
2254 util.rename(src, dest)
2256 util.rename(src, dest)
2255 return a
2257 return a
2256
2258
2257 def instance(ui, path, create):
2259 def instance(ui, path, create):
2258 return localrepository(ui, util.drop_scheme('file', path), create)
2260 return localrepository(ui, util.drop_scheme('file', path), create)
2259
2261
2260 def islocal(path):
2262 def islocal(path):
2261 return True
2263 return True
@@ -1,380 +1,388
1 # subrepo.py - sub-repository handling for Mercurial
1 # subrepo.py - sub-repository handling for Mercurial
2 #
2 #
3 # Copyright 2009-2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2009-2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import errno, os, re, xml.dom.minidom, shutil, urlparse, posixpath
8 import errno, os, re, xml.dom.minidom, shutil, urlparse, posixpath
9 from i18n import _
9 from i18n import _
10 import config, util, node, error
10 import config, util, node, error
11 hg = None
11 hg = None
12
12
13 nullstate = ('', '', 'empty')
13 nullstate = ('', '', 'empty')
14
14
15 def state(ctx):
15 def state(ctx):
16 p = config.config()
16 p = config.config()
17 def read(f, sections=None, remap=None):
17 def read(f, sections=None, remap=None):
18 if f in ctx:
18 if f in ctx:
19 p.parse(f, ctx[f].data(), sections, remap, read)
19 p.parse(f, ctx[f].data(), sections, remap, read)
20 else:
20 else:
21 raise util.Abort(_("subrepo spec file %s not found") % f)
21 raise util.Abort(_("subrepo spec file %s not found") % f)
22
22
23 if '.hgsub' in ctx:
23 if '.hgsub' in ctx:
24 read('.hgsub')
24 read('.hgsub')
25
25
26 rev = {}
26 rev = {}
27 if '.hgsubstate' in ctx:
27 if '.hgsubstate' in ctx:
28 try:
28 try:
29 for l in ctx['.hgsubstate'].data().splitlines():
29 for l in ctx['.hgsubstate'].data().splitlines():
30 revision, path = l.split(" ", 1)
30 revision, path = l.split(" ", 1)
31 rev[path] = revision
31 rev[path] = revision
32 except IOError, err:
32 except IOError, err:
33 if err.errno != errno.ENOENT:
33 if err.errno != errno.ENOENT:
34 raise
34 raise
35
35
36 state = {}
36 state = {}
37 for path, src in p[''].items():
37 for path, src in p[''].items():
38 kind = 'hg'
38 kind = 'hg'
39 if src.startswith('['):
39 if src.startswith('['):
40 if ']' not in src:
40 if ']' not in src:
41 raise util.Abort(_('missing ] in subrepo source'))
41 raise util.Abort(_('missing ] in subrepo source'))
42 kind, src = src.split(']', 1)
42 kind, src = src.split(']', 1)
43 kind = kind[1:]
43 kind = kind[1:]
44 state[path] = (src.strip(), rev.get(path, ''), kind)
44 state[path] = (src.strip(), rev.get(path, ''), kind)
45
45
46 return state
46 return state
47
47
48 def writestate(repo, state):
48 def writestate(repo, state):
49 repo.wwrite('.hgsubstate',
49 repo.wwrite('.hgsubstate',
50 ''.join(['%s %s\n' % (state[s][1], s)
50 ''.join(['%s %s\n' % (state[s][1], s)
51 for s in sorted(state)]), '')
51 for s in sorted(state)]), '')
52
52
53 def submerge(repo, wctx, mctx, actx):
53 def submerge(repo, wctx, mctx, actx):
54 # working context, merging context, ancestor context
54 # working context, merging context, ancestor context
55 if mctx == actx: # backwards?
55 if mctx == actx: # backwards?
56 actx = wctx.p1()
56 actx = wctx.p1()
57 s1 = wctx.substate
57 s1 = wctx.substate
58 s2 = mctx.substate
58 s2 = mctx.substate
59 sa = actx.substate
59 sa = actx.substate
60 sm = {}
60 sm = {}
61
61
62 repo.ui.debug("subrepo merge %s %s %s\n" % (wctx, mctx, actx))
62 repo.ui.debug("subrepo merge %s %s %s\n" % (wctx, mctx, actx))
63
63
64 def debug(s, msg, r=""):
64 def debug(s, msg, r=""):
65 if r:
65 if r:
66 r = "%s:%s:%s" % r
66 r = "%s:%s:%s" % r
67 repo.ui.debug(" subrepo %s: %s %s\n" % (s, msg, r))
67 repo.ui.debug(" subrepo %s: %s %s\n" % (s, msg, r))
68
68
69 for s, l in s1.items():
69 for s, l in s1.items():
70 if wctx != actx and wctx.sub(s).dirty():
70 if wctx != actx and wctx.sub(s).dirty():
71 l = (l[0], l[1] + "+")
71 l = (l[0], l[1] + "+")
72 a = sa.get(s, nullstate)
72 a = sa.get(s, nullstate)
73 if s in s2:
73 if s in s2:
74 r = s2[s]
74 r = s2[s]
75 if l == r or r == a: # no change or local is newer
75 if l == r or r == a: # no change or local is newer
76 sm[s] = l
76 sm[s] = l
77 continue
77 continue
78 elif l == a: # other side changed
78 elif l == a: # other side changed
79 debug(s, "other changed, get", r)
79 debug(s, "other changed, get", r)
80 wctx.sub(s).get(r)
80 wctx.sub(s).get(r)
81 sm[s] = r
81 sm[s] = r
82 elif l[0] != r[0]: # sources differ
82 elif l[0] != r[0]: # sources differ
83 if repo.ui.promptchoice(
83 if repo.ui.promptchoice(
84 _(' subrepository sources for %s differ\n'
84 _(' subrepository sources for %s differ\n'
85 'use (l)ocal source (%s) or (r)emote source (%s)?')
85 'use (l)ocal source (%s) or (r)emote source (%s)?')
86 % (s, l[0], r[0]),
86 % (s, l[0], r[0]),
87 (_('&Local'), _('&Remote')), 0):
87 (_('&Local'), _('&Remote')), 0):
88 debug(s, "prompt changed, get", r)
88 debug(s, "prompt changed, get", r)
89 wctx.sub(s).get(r)
89 wctx.sub(s).get(r)
90 sm[s] = r
90 sm[s] = r
91 elif l[1] == a[1]: # local side is unchanged
91 elif l[1] == a[1]: # local side is unchanged
92 debug(s, "other side changed, get", r)
92 debug(s, "other side changed, get", r)
93 wctx.sub(s).get(r)
93 wctx.sub(s).get(r)
94 sm[s] = r
94 sm[s] = r
95 else:
95 else:
96 debug(s, "both sides changed, merge with", r)
96 debug(s, "both sides changed, merge with", r)
97 wctx.sub(s).merge(r)
97 wctx.sub(s).merge(r)
98 sm[s] = l
98 sm[s] = l
99 elif l == a: # remote removed, local unchanged
99 elif l == a: # remote removed, local unchanged
100 debug(s, "remote removed, remove")
100 debug(s, "remote removed, remove")
101 wctx.sub(s).remove()
101 wctx.sub(s).remove()
102 else:
102 else:
103 if repo.ui.promptchoice(
103 if repo.ui.promptchoice(
104 _(' local changed subrepository %s which remote removed\n'
104 _(' local changed subrepository %s which remote removed\n'
105 'use (c)hanged version or (d)elete?') % s,
105 'use (c)hanged version or (d)elete?') % s,
106 (_('&Changed'), _('&Delete')), 0):
106 (_('&Changed'), _('&Delete')), 0):
107 debug(s, "prompt remove")
107 debug(s, "prompt remove")
108 wctx.sub(s).remove()
108 wctx.sub(s).remove()
109
109
110 for s, r in s2.items():
110 for s, r in s2.items():
111 if s in s1:
111 if s in s1:
112 continue
112 continue
113 elif s not in sa:
113 elif s not in sa:
114 debug(s, "remote added, get", r)
114 debug(s, "remote added, get", r)
115 mctx.sub(s).get(r)
115 mctx.sub(s).get(r)
116 sm[s] = r
116 sm[s] = r
117 elif r != sa[s]:
117 elif r != sa[s]:
118 if repo.ui.promptchoice(
118 if repo.ui.promptchoice(
119 _(' remote changed subrepository %s which local removed\n'
119 _(' remote changed subrepository %s which local removed\n'
120 'use (c)hanged version or (d)elete?') % s,
120 'use (c)hanged version or (d)elete?') % s,
121 (_('&Changed'), _('&Delete')), 0) == 0:
121 (_('&Changed'), _('&Delete')), 0) == 0:
122 debug(s, "prompt recreate", r)
122 debug(s, "prompt recreate", r)
123 wctx.sub(s).get(r)
123 wctx.sub(s).get(r)
124 sm[s] = r
124 sm[s] = r
125
125
126 # record merged .hgsubstate
126 # record merged .hgsubstate
127 writestate(repo, sm)
127 writestate(repo, sm)
128
128
129 def relpath(sub):
130 if not hasattr(sub, '_repo'):
131 return sub._path
132 parent = sub._repo
133 while hasattr(parent, '_subparent'):
134 parent = parent._subparent
135 return sub._repo.root[len(parent.root)+1:]
136
129 def _abssource(repo, push=False):
137 def _abssource(repo, push=False):
130 if hasattr(repo, '_subparent'):
138 if hasattr(repo, '_subparent'):
131 source = repo._subsource
139 source = repo._subsource
132 if source.startswith('/') or '://' in source:
140 if source.startswith('/') or '://' in source:
133 return source
141 return source
134 parent = _abssource(repo._subparent, push)
142 parent = _abssource(repo._subparent, push)
135 if '://' in parent:
143 if '://' in parent:
136 if parent[-1] == '/':
144 if parent[-1] == '/':
137 parent = parent[:-1]
145 parent = parent[:-1]
138 r = urlparse.urlparse(parent + '/' + source)
146 r = urlparse.urlparse(parent + '/' + source)
139 r = urlparse.urlunparse((r[0], r[1],
147 r = urlparse.urlunparse((r[0], r[1],
140 posixpath.normpath(r.path),
148 posixpath.normpath(r.path),
141 r[3], r[4], r[5]))
149 r[3], r[4], r[5]))
142 return r
150 return r
143 return posixpath.normpath(os.path.join(parent, repo._subsource))
151 return posixpath.normpath(os.path.join(parent, repo._subsource))
144 if push and repo.ui.config('paths', 'default-push'):
152 if push and repo.ui.config('paths', 'default-push'):
145 return repo.ui.config('paths', 'default-push', repo.root)
153 return repo.ui.config('paths', 'default-push', repo.root)
146 return repo.ui.config('paths', 'default', repo.root)
154 return repo.ui.config('paths', 'default', repo.root)
147
155
148 def subrepo(ctx, path):
156 def subrepo(ctx, path):
149 # subrepo inherently violates our import layering rules
157 # subrepo inherently violates our import layering rules
150 # because it wants to make repo objects from deep inside the stack
158 # because it wants to make repo objects from deep inside the stack
151 # so we manually delay the circular imports to not break
159 # so we manually delay the circular imports to not break
152 # scripts that don't use our demand-loading
160 # scripts that don't use our demand-loading
153 global hg
161 global hg
154 import hg as h
162 import hg as h
155 hg = h
163 hg = h
156
164
157 util.path_auditor(ctx._repo.root)(path)
165 util.path_auditor(ctx._repo.root)(path)
158 state = ctx.substate.get(path, nullstate)
166 state = ctx.substate.get(path, nullstate)
159 if state[2] not in types:
167 if state[2] not in types:
160 raise util.Abort(_('unknown subrepo type %s') % state[2])
168 raise util.Abort(_('unknown subrepo type %s') % state[2])
161 return types[state[2]](ctx, path, state[:2])
169 return types[state[2]](ctx, path, state[:2])
162
170
163 # subrepo classes need to implement the following methods:
171 # subrepo classes need to implement the following methods:
164 # __init__(self, ctx, path, state)
172 # __init__(self, ctx, path, state)
165 # dirty(self): returns true if the dirstate of the subrepo
173 # dirty(self): returns true if the dirstate of the subrepo
166 # does not match current stored state
174 # does not match current stored state
167 # commit(self, text, user, date): commit the current changes
175 # commit(self, text, user, date): commit the current changes
168 # to the subrepo with the given log message. Use given
176 # to the subrepo with the given log message. Use given
169 # user and date if possible. Return the new state of the subrepo.
177 # user and date if possible. Return the new state of the subrepo.
170 # remove(self): remove the subrepo (should verify the dirstate
178 # remove(self): remove the subrepo (should verify the dirstate
171 # is not dirty first)
179 # is not dirty first)
172 # get(self, state): run whatever commands are needed to put the
180 # get(self, state): run whatever commands are needed to put the
173 # subrepo into this state
181 # subrepo into this state
174 # merge(self, state): merge currently-saved state with the new state.
182 # merge(self, state): merge currently-saved state with the new state.
175 # push(self, force): perform whatever action is analagous to 'hg push'
183 # push(self, force): perform whatever action is analagous to 'hg push'
176 # This may be a no-op on some systems.
184 # This may be a no-op on some systems.
177
185
178 class hgsubrepo(object):
186 class hgsubrepo(object):
179 def __init__(self, ctx, path, state):
187 def __init__(self, ctx, path, state):
180 self._path = path
188 self._path = path
181 self._state = state
189 self._state = state
182 r = ctx._repo
190 r = ctx._repo
183 root = r.wjoin(path)
191 root = r.wjoin(path)
184 create = False
192 create = False
185 if not os.path.exists(os.path.join(root, '.hg')):
193 if not os.path.exists(os.path.join(root, '.hg')):
186 create = True
194 create = True
187 util.makedirs(root)
195 util.makedirs(root)
188 self._repo = hg.repository(r.ui, root, create=create)
196 self._repo = hg.repository(r.ui, root, create=create)
189 self._repo._subparent = r
197 self._repo._subparent = r
190 self._repo._subsource = state[0]
198 self._repo._subsource = state[0]
191
199
192 if create:
200 if create:
193 fp = self._repo.opener("hgrc", "w", text=True)
201 fp = self._repo.opener("hgrc", "w", text=True)
194 fp.write('[paths]\n')
202 fp.write('[paths]\n')
195
203
196 def addpathconfig(key, value):
204 def addpathconfig(key, value):
197 fp.write('%s = %s\n' % (key, value))
205 fp.write('%s = %s\n' % (key, value))
198 self._repo.ui.setconfig('paths', key, value)
206 self._repo.ui.setconfig('paths', key, value)
199
207
200 defpath = _abssource(self._repo)
208 defpath = _abssource(self._repo)
201 defpushpath = _abssource(self._repo, True)
209 defpushpath = _abssource(self._repo, True)
202 addpathconfig('default', defpath)
210 addpathconfig('default', defpath)
203 if defpath != defpushpath:
211 if defpath != defpushpath:
204 addpathconfig('default-push', defpushpath)
212 addpathconfig('default-push', defpushpath)
205 fp.close()
213 fp.close()
206
214
207 def dirty(self):
215 def dirty(self):
208 r = self._state[1]
216 r = self._state[1]
209 if r == '':
217 if r == '':
210 return True
218 return True
211 w = self._repo[None]
219 w = self._repo[None]
212 if w.p1() != self._repo[r]: # version checked out change
220 if w.p1() != self._repo[r]: # version checked out change
213 return True
221 return True
214 return w.dirty() # working directory changed
222 return w.dirty() # working directory changed
215
223
216 def commit(self, text, user, date):
224 def commit(self, text, user, date):
217 self._repo.ui.debug("committing subrepo %s\n" % self._path)
225 self._repo.ui.debug("committing subrepo %s\n" % relpath(self))
218 n = self._repo.commit(text, user, date)
226 n = self._repo.commit(text, user, date)
219 if not n:
227 if not n:
220 return self._repo['.'].hex() # different version checked out
228 return self._repo['.'].hex() # different version checked out
221 return node.hex(n)
229 return node.hex(n)
222
230
223 def remove(self):
231 def remove(self):
224 # we can't fully delete the repository as it may contain
232 # we can't fully delete the repository as it may contain
225 # local-only history
233 # local-only history
226 self._repo.ui.note(_('removing subrepo %s\n') % self._path)
234 self._repo.ui.note(_('removing subrepo %s\n') % relpath(self))
227 hg.clean(self._repo, node.nullid, False)
235 hg.clean(self._repo, node.nullid, False)
228
236
229 def _get(self, state):
237 def _get(self, state):
230 source, revision, kind = state
238 source, revision, kind = state
231 try:
239 try:
232 self._repo.lookup(revision)
240 self._repo.lookup(revision)
233 except error.RepoError:
241 except error.RepoError:
234 self._repo._subsource = source
242 self._repo._subsource = source
235 srcurl = _abssource(self._repo)
243 srcurl = _abssource(self._repo)
236 self._repo.ui.status(_('pulling subrepo %s from %s\n')
244 self._repo.ui.status(_('pulling subrepo %s from %s\n')
237 % (self._path, srcurl))
245 % (relpath(self), srcurl))
238 other = hg.repository(self._repo.ui, srcurl)
246 other = hg.repository(self._repo.ui, srcurl)
239 self._repo.pull(other)
247 self._repo.pull(other)
240
248
241 def get(self, state):
249 def get(self, state):
242 self._get(state)
250 self._get(state)
243 source, revision, kind = state
251 source, revision, kind = state
244 self._repo.ui.debug("getting subrepo %s\n" % self._path)
252 self._repo.ui.debug("getting subrepo %s\n" % self._path)
245 hg.clean(self._repo, revision, False)
253 hg.clean(self._repo, revision, False)
246
254
247 def merge(self, state):
255 def merge(self, state):
248 self._get(state)
256 self._get(state)
249 cur = self._repo['.']
257 cur = self._repo['.']
250 dst = self._repo[state[1]]
258 dst = self._repo[state[1]]
251 anc = dst.ancestor(cur)
259 anc = dst.ancestor(cur)
252 if anc == cur:
260 if anc == cur:
253 self._repo.ui.debug("updating subrepo %s\n" % self._path)
261 self._repo.ui.debug("updating subrepo %s\n" % relpath(self))
254 hg.update(self._repo, state[1])
262 hg.update(self._repo, state[1])
255 elif anc == dst:
263 elif anc == dst:
256 self._repo.ui.debug("skipping subrepo %s\n" % self._path)
264 self._repo.ui.debug("skipping subrepo %s\n" % relpath(self))
257 else:
265 else:
258 self._repo.ui.debug("merging subrepo %s\n" % self._path)
266 self._repo.ui.debug("merging subrepo %s\n" % relpath(self))
259 hg.merge(self._repo, state[1], remind=False)
267 hg.merge(self._repo, state[1], remind=False)
260
268
261 def push(self, force):
269 def push(self, force):
262 # push subrepos depth-first for coherent ordering
270 # push subrepos depth-first for coherent ordering
263 c = self._repo['']
271 c = self._repo['']
264 subs = c.substate # only repos that are committed
272 subs = c.substate # only repos that are committed
265 for s in sorted(subs):
273 for s in sorted(subs):
266 if not c.sub(s).push(force):
274 if not c.sub(s).push(force):
267 return False
275 return False
268
276
269 dsturl = _abssource(self._repo, True)
277 dsturl = _abssource(self._repo, True)
270 self._repo.ui.status(_('pushing subrepo %s to %s\n') %
278 self._repo.ui.status(_('pushing subrepo %s to %s\n') %
271 (self._path, dsturl))
279 (relpath(self), dsturl))
272 other = hg.repository(self._repo.ui, dsturl)
280 other = hg.repository(self._repo.ui, dsturl)
273 return self._repo.push(other, force)
281 return self._repo.push(other, force)
274
282
275 class svnsubrepo(object):
283 class svnsubrepo(object):
276 def __init__(self, ctx, path, state):
284 def __init__(self, ctx, path, state):
277 self._path = path
285 self._path = path
278 self._state = state
286 self._state = state
279 self._ctx = ctx
287 self._ctx = ctx
280 self._ui = ctx._repo.ui
288 self._ui = ctx._repo.ui
281
289
282 def _svncommand(self, commands):
290 def _svncommand(self, commands):
283 path = os.path.join(self._ctx._repo.origroot, self._path)
291 path = os.path.join(self._ctx._repo.origroot, self._path)
284 cmd = ['svn'] + commands + [path]
292 cmd = ['svn'] + commands + [path]
285 cmd = [util.shellquote(arg) for arg in cmd]
293 cmd = [util.shellquote(arg) for arg in cmd]
286 cmd = util.quotecommand(' '.join(cmd))
294 cmd = util.quotecommand(' '.join(cmd))
287 env = dict(os.environ)
295 env = dict(os.environ)
288 # Avoid localized output, preserve current locale for everything else.
296 # Avoid localized output, preserve current locale for everything else.
289 env['LC_MESSAGES'] = 'C'
297 env['LC_MESSAGES'] = 'C'
290 write, read, err = util.popen3(cmd, env=env, newlines=True)
298 write, read, err = util.popen3(cmd, env=env, newlines=True)
291 retdata = read.read()
299 retdata = read.read()
292 err = err.read().strip()
300 err = err.read().strip()
293 if err:
301 if err:
294 raise util.Abort(err)
302 raise util.Abort(err)
295 return retdata
303 return retdata
296
304
297 def _wcrev(self):
305 def _wcrev(self):
298 output = self._svncommand(['info', '--xml'])
306 output = self._svncommand(['info', '--xml'])
299 doc = xml.dom.minidom.parseString(output)
307 doc = xml.dom.minidom.parseString(output)
300 entries = doc.getElementsByTagName('entry')
308 entries = doc.getElementsByTagName('entry')
301 if not entries:
309 if not entries:
302 return 0
310 return 0
303 return int(entries[0].getAttribute('revision') or 0)
311 return int(entries[0].getAttribute('revision') or 0)
304
312
305 def _wcchanged(self):
313 def _wcchanged(self):
306 """Return (changes, extchanges) where changes is True
314 """Return (changes, extchanges) where changes is True
307 if the working directory was changed, and extchanges is
315 if the working directory was changed, and extchanges is
308 True if any of these changes concern an external entry.
316 True if any of these changes concern an external entry.
309 """
317 """
310 output = self._svncommand(['status', '--xml'])
318 output = self._svncommand(['status', '--xml'])
311 externals, changes = [], []
319 externals, changes = [], []
312 doc = xml.dom.minidom.parseString(output)
320 doc = xml.dom.minidom.parseString(output)
313 for e in doc.getElementsByTagName('entry'):
321 for e in doc.getElementsByTagName('entry'):
314 s = e.getElementsByTagName('wc-status')
322 s = e.getElementsByTagName('wc-status')
315 if not s:
323 if not s:
316 continue
324 continue
317 item = s[0].getAttribute('item')
325 item = s[0].getAttribute('item')
318 props = s[0].getAttribute('props')
326 props = s[0].getAttribute('props')
319 path = e.getAttribute('path')
327 path = e.getAttribute('path')
320 if item == 'external':
328 if item == 'external':
321 externals.append(path)
329 externals.append(path)
322 if (item not in ('', 'normal', 'unversioned', 'external')
330 if (item not in ('', 'normal', 'unversioned', 'external')
323 or props not in ('', 'none')):
331 or props not in ('', 'none')):
324 changes.append(path)
332 changes.append(path)
325 for path in changes:
333 for path in changes:
326 for ext in externals:
334 for ext in externals:
327 if path == ext or path.startswith(ext + os.sep):
335 if path == ext or path.startswith(ext + os.sep):
328 return True, True
336 return True, True
329 return bool(changes), False
337 return bool(changes), False
330
338
331 def dirty(self):
339 def dirty(self):
332 if self._wcrev() == self._state[1] and not self._wcchanged()[0]:
340 if self._wcrev() == self._state[1] and not self._wcchanged()[0]:
333 return False
341 return False
334 return True
342 return True
335
343
336 def commit(self, text, user, date):
344 def commit(self, text, user, date):
337 # user and date are out of our hands since svn is centralized
345 # user and date are out of our hands since svn is centralized
338 changed, extchanged = self._wcchanged()
346 changed, extchanged = self._wcchanged()
339 if not changed:
347 if not changed:
340 return self._wcrev()
348 return self._wcrev()
341 if extchanged:
349 if extchanged:
342 # Do not try to commit externals
350 # Do not try to commit externals
343 raise util.Abort(_('cannot commit svn externals'))
351 raise util.Abort(_('cannot commit svn externals'))
344 commitinfo = self._svncommand(['commit', '-m', text])
352 commitinfo = self._svncommand(['commit', '-m', text])
345 self._ui.status(commitinfo)
353 self._ui.status(commitinfo)
346 newrev = re.search('Committed revision ([\d]+).', commitinfo)
354 newrev = re.search('Committed revision ([\d]+).', commitinfo)
347 if not newrev:
355 if not newrev:
348 raise util.Abort(commitinfo.splitlines()[-1])
356 raise util.Abort(commitinfo.splitlines()[-1])
349 newrev = newrev.groups()[0]
357 newrev = newrev.groups()[0]
350 self._ui.status(self._svncommand(['update', '-r', newrev]))
358 self._ui.status(self._svncommand(['update', '-r', newrev]))
351 return newrev
359 return newrev
352
360
353 def remove(self):
361 def remove(self):
354 if self.dirty():
362 if self.dirty():
355 self._ui.warn(_('not removing repo %s because '
363 self._ui.warn(_('not removing repo %s because '
356 'it has changes.\n' % self._path))
364 'it has changes.\n' % self._path))
357 return
365 return
358 self._ui.note(_('removing subrepo %s\n') % self._path)
366 self._ui.note(_('removing subrepo %s\n') % self._path)
359 shutil.rmtree(self._ctx.repo.join(self._path))
367 shutil.rmtree(self._ctx.repo.join(self._path))
360
368
361 def get(self, state):
369 def get(self, state):
362 status = self._svncommand(['checkout', state[0], '--revision', state[1]])
370 status = self._svncommand(['checkout', state[0], '--revision', state[1]])
363 if not re.search('Checked out revision [\d]+.', status):
371 if not re.search('Checked out revision [\d]+.', status):
364 raise util.Abort(status.splitlines()[-1])
372 raise util.Abort(status.splitlines()[-1])
365 self._ui.status(status)
373 self._ui.status(status)
366
374
367 def merge(self, state):
375 def merge(self, state):
368 old = int(self._state[1])
376 old = int(self._state[1])
369 new = int(state[1])
377 new = int(state[1])
370 if new > old:
378 if new > old:
371 self.get(state)
379 self.get(state)
372
380
373 def push(self, force):
381 def push(self, force):
374 # nothing for svn
382 # nothing for svn
375 pass
383 pass
376
384
377 types = {
385 types = {
378 'hg': hgsubrepo,
386 'hg': hgsubrepo,
379 'svn': svnsubrepo,
387 'svn': svnsubrepo,
380 }
388 }
@@ -1,64 +1,64
1 % Preparing the subrepository sub2
1 % Preparing the subrepository sub2
2 adding sub2/sub2
2 adding sub2/sub2
3 % Preparing the sub1 repo which depends on the subrepo sub2
3 % Preparing the sub1 repo which depends on the subrepo sub2
4 updating to branch default
4 updating to branch default
5 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
5 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
6 adding sub1/.hgsub
6 adding sub1/.hgsub
7 adding sub1/sub1
7 adding sub1/sub1
8 committing subrepository sub2
8 committing subrepository sub2
9 % Preparing the main repo which depends on the subrepo sub1
9 % Preparing the main repo which depends on the subrepo sub1
10 updating to branch default
10 updating to branch default
11 pulling ...sub2
11 pulling ...sub2
12 requesting all changes
12 requesting all changes
13 adding changesets
13 adding changesets
14 adding manifests
14 adding manifests
15 adding file changes
15 adding file changes
16 added 1 changesets with 1 changes to 1 files
16 added 1 changesets with 1 changes to 1 files
17 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
17 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
18 adding main/.hgsub
18 adding main/.hgsub
19 adding main/main
19 adding main/main
20 committing subrepository sub1
20 committing subrepository sub1
21 % Cleaning both repositories, just as a clone -U
21 % Cleaning both repositories, just as a clone -U
22 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
22 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
23 0 files updated, 0 files merged, 3 files removed, 0 files unresolved
23 0 files updated, 0 files merged, 3 files removed, 0 files unresolved
24 0 files updated, 0 files merged, 3 files removed, 0 files unresolved
24 0 files updated, 0 files merged, 3 files removed, 0 files unresolved
25 % Clone main
25 % Clone main
26 updating to branch default
26 updating to branch default
27 pulling ...sub1
27 pulling ...sub1
28 requesting all changes
28 requesting all changes
29 adding changesets
29 adding changesets
30 adding manifests
30 adding manifests
31 adding file changes
31 adding file changes
32 added 1 changesets with 3 changes to 3 files
32 added 1 changesets with 3 changes to 3 files
33 pulling ...sub2
33 pulling ...sub2
34 requesting all changes
34 requesting all changes
35 adding changesets
35 adding changesets
36 adding manifests
36 adding manifests
37 adding file changes
37 adding file changes
38 added 1 changesets with 1 changes to 1 files
38 added 1 changesets with 1 changes to 1 files
39 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
39 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
40 % Checking cloned repo ids
40 % Checking cloned repo ids
41 cloned 7f491f53a367 tip
41 cloned 7f491f53a367 tip
42 cloned/sub1 fc3b4ce2696f tip
42 cloned/sub1 fc3b4ce2696f tip
43 cloned/sub1/sub2 c57a0840e3ba tip
43 cloned/sub1/sub2 c57a0840e3ba tip
44 % debugsub output for main and sub1
44 % debugsub output for main and sub1
45 path sub1
45 path sub1
46 source ../sub1
46 source ../sub1
47 revision fc3b4ce2696f7741438c79207583768f2ce6b0dd
47 revision fc3b4ce2696f7741438c79207583768f2ce6b0dd
48 path sub2
48 path sub2
49 source ../sub2
49 source ../sub2
50 revision c57a0840e3badd667ef3c3ef65471609acb2ba3c
50 revision c57a0840e3badd667ef3c3ef65471609acb2ba3c
51 % Modifying deeply nested sub2
51 % Modifying deeply nested sub2
52 committing subrepository sub1
52 committing subrepository sub1
53 committing subrepository sub2
53 committing subrepository sub1/sub2
54 % Checking modified node ids
54 % Checking modified node ids
55 cloned ffe6649062fe tip
55 cloned ffe6649062fe tip
56 cloned/sub1 2ecb03bf44a9 tip
56 cloned/sub1 2ecb03bf44a9 tip
57 cloned/sub1/sub2 53dd3430bcaf tip
57 cloned/sub1/sub2 53dd3430bcaf tip
58 % debugsub output for main and sub1
58 % debugsub output for main and sub1
59 path sub1
59 path sub1
60 source ../sub1
60 source ../sub1
61 revision 2ecb03bf44a94e749e8669481dd9069526ce7cb9
61 revision 2ecb03bf44a94e749e8669481dd9069526ce7cb9
62 path sub2
62 path sub2
63 source ../sub2
63 source ../sub2
64 revision 53dd3430bcaf5ab4a7c48262bcad6d441f510487
64 revision 53dd3430bcaf5ab4a7c48262bcad6d441f510487
@@ -1,295 +1,295
1 % first revision, no sub
1 % first revision, no sub
2 adding a
2 adding a
3 % add first sub
3 % add first sub
4 adding a
4 adding a
5 parent: 0:f7b1eb17ad24 tip
5 parent: 0:f7b1eb17ad24 tip
6 0
6 0
7 branch: default
7 branch: default
8 commit: 1 added, 1 subrepos
8 commit: 1 added, 1 subrepos
9 update: (current)
9 update: (current)
10 committing subrepository s
10 committing subrepository s
11 % add sub sub
11 % add sub sub
12 parent: 1:7cf8cfea66e4 tip
12 parent: 1:7cf8cfea66e4 tip
13 1
13 1
14 branch: default
14 branch: default
15 commit: 1 subrepos
15 commit: 1 subrepos
16 update: (current)
16 update: (current)
17 committing subrepository s
17 committing subrepository s
18 committing subrepository ss
18 committing subrepository ss
19 parent: 2:df30734270ae tip
19 parent: 2:df30734270ae tip
20 2
20 2
21 branch: default
21 branch: default
22 commit: (clean)
22 commit: (clean)
23 update: (current)
23 update: (current)
24 % bump sub rev
24 % bump sub rev
25 committing subrepository s
25 committing subrepository s
26 % leave sub dirty
26 % leave sub dirty
27 committing subrepository s
27 committing subrepository s
28 changeset: 3:1c833a7a9e3a
28 changeset: 3:1c833a7a9e3a
29 tag: tip
29 tag: tip
30 user: test
30 user: test
31 date: Thu Jan 01 00:00:00 1970 +0000
31 date: Thu Jan 01 00:00:00 1970 +0000
32 summary: 4
32 summary: 4
33
33
34 % check caching
34 % check caching
35 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
35 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
36 % restore
36 % restore
37 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
37 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
38 path s
38 path s
39 source s
39 source s
40 revision 1c833a7a9e3a4445c711aaf0f012379cd0d4034e
40 revision 1c833a7a9e3a4445c711aaf0f012379cd0d4034e
41 % new branch for merge tests
41 % new branch for merge tests
42 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
42 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
43 adding t/t
43 adding t/t
44 % 5
44 % 5
45 committing subrepository t
45 committing subrepository t
46 created new head
46 created new head
47 % 6
47 % 6
48 committing subrepository t
48 committing subrepository t
49 path s
49 path s
50 source s
50 source s
51 revision e4ece1bf43360ddc8f6a96432201a37b7cd27ae4
51 revision e4ece1bf43360ddc8f6a96432201a37b7cd27ae4
52 path t
52 path t
53 source t
53 source t
54 revision 6747d179aa9a688023c4b0cad32e4c92bb7f34ad
54 revision 6747d179aa9a688023c4b0cad32e4c92bb7f34ad
55 % 7
55 % 7
56 committing subrepository t
56 committing subrepository t
57 % 8
57 % 8
58 % merge tests
58 % merge tests
59 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
59 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
60 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
60 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
61 (branch merge, don't forget to commit)
61 (branch merge, don't forget to commit)
62 path s
62 path s
63 source s
63 source s
64 revision fc627a69481fcbe5f1135069e8a3881c023e4cf5
64 revision fc627a69481fcbe5f1135069e8a3881c023e4cf5
65 path t
65 path t
66 source t
66 source t
67 revision 60ca1237c19474e7a3978b0dc1ca4e6f36d51382
67 revision 60ca1237c19474e7a3978b0dc1ca4e6f36d51382
68 created new head
68 created new head
69 searching for copies back to rev 2
69 searching for copies back to rev 2
70 resolving manifests
70 resolving manifests
71 overwrite None partial False
71 overwrite None partial False
72 ancestor 1f14a2e2d3ec local f0d2028bf86d+ remote 1831e14459c4
72 ancestor 1f14a2e2d3ec local f0d2028bf86d+ remote 1831e14459c4
73 .hgsubstate: versions differ -> m
73 .hgsubstate: versions differ -> m
74 update: .hgsubstate 1/1 files (100.00%)
74 update: .hgsubstate 1/1 files (100.00%)
75 subrepo merge f0d2028bf86d+ 1831e14459c4 1f14a2e2d3ec
75 subrepo merge f0d2028bf86d+ 1831e14459c4 1f14a2e2d3ec
76 subrepo t: other changed, get t:6747d179aa9a688023c4b0cad32e4c92bb7f34ad:hg
76 subrepo t: other changed, get t:6747d179aa9a688023c4b0cad32e4c92bb7f34ad:hg
77 getting subrepo t
77 getting subrepo t
78 resolving manifests
78 resolving manifests
79 overwrite True partial False
79 overwrite True partial False
80 ancestor 60ca1237c194+ local 60ca1237c194+ remote 6747d179aa9a
80 ancestor 60ca1237c194+ local 60ca1237c194+ remote 6747d179aa9a
81 t: remote is newer -> g
81 t: remote is newer -> g
82 update: t 1/1 files (100.00%)
82 update: t 1/1 files (100.00%)
83 getting t
83 getting t
84 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
84 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
85 (branch merge, don't forget to commit)
85 (branch merge, don't forget to commit)
86 path s
86 path s
87 source s
87 source s
88 revision fc627a69481fcbe5f1135069e8a3881c023e4cf5
88 revision fc627a69481fcbe5f1135069e8a3881c023e4cf5
89 path t
89 path t
90 source t
90 source t
91 revision 6747d179aa9a688023c4b0cad32e4c92bb7f34ad
91 revision 6747d179aa9a688023c4b0cad32e4c92bb7f34ad
92 committing subrepository t
92 committing subrepository t
93 searching for copies back to rev 2
93 searching for copies back to rev 2
94 resolving manifests
94 resolving manifests
95 overwrite None partial False
95 overwrite None partial False
96 ancestor 1831e14459c4 local e45c8b14af55+ remote f94576341bcf
96 ancestor 1831e14459c4 local e45c8b14af55+ remote f94576341bcf
97 .hgsubstate: versions differ -> m
97 .hgsubstate: versions differ -> m
98 update: .hgsubstate 1/1 files (100.00%)
98 update: .hgsubstate 1/1 files (100.00%)
99 subrepo merge e45c8b14af55+ f94576341bcf 1831e14459c4
99 subrepo merge e45c8b14af55+ f94576341bcf 1831e14459c4
100 subrepo t: both sides changed, merge with t:7af322bc1198a32402fe903e0b7ebcfc5c9bf8f4:hg
100 subrepo t: both sides changed, merge with t:7af322bc1198a32402fe903e0b7ebcfc5c9bf8f4:hg
101 merging subrepo t
101 merging subrepo t
102 searching for copies back to rev 2
102 searching for copies back to rev 2
103 resolving manifests
103 resolving manifests
104 overwrite None partial False
104 overwrite None partial False
105 ancestor 6747d179aa9a local 20a0db6fbf6c+ remote 7af322bc1198
105 ancestor 6747d179aa9a local 20a0db6fbf6c+ remote 7af322bc1198
106 t: versions differ -> m
106 t: versions differ -> m
107 preserving t for resolve of t
107 preserving t for resolve of t
108 update: t 1/1 files (100.00%)
108 update: t 1/1 files (100.00%)
109 picked tool 'internal:merge' for t (binary False symlink False)
109 picked tool 'internal:merge' for t (binary False symlink False)
110 merging t
110 merging t
111 my t@20a0db6fbf6c+ other t@7af322bc1198 ancestor t@6747d179aa9a
111 my t@20a0db6fbf6c+ other t@7af322bc1198 ancestor t@6747d179aa9a
112 warning: conflicts during merge.
112 warning: conflicts during merge.
113 merging t failed!
113 merging t failed!
114 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
114 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
115 use 'hg resolve' to retry unresolved file merges or 'hg update -C' to abandon
115 use 'hg resolve' to retry unresolved file merges or 'hg update -C' to abandon
116 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
116 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
117 (branch merge, don't forget to commit)
117 (branch merge, don't forget to commit)
118 % should conflict
118 % should conflict
119 <<<<<<< local
119 <<<<<<< local
120 conflict
120 conflict
121 =======
121 =======
122 t3
122 t3
123 >>>>>>> other
123 >>>>>>> other
124 % clone
124 % clone
125 updating to branch default
125 updating to branch default
126 pulling subrepo s from .../sub/t/s
126 pulling subrepo s from .../sub/t/s
127 requesting all changes
127 requesting all changes
128 adding changesets
128 adding changesets
129 adding manifests
129 adding manifests
130 adding file changes
130 adding file changes
131 added 4 changesets with 5 changes to 3 files
131 added 4 changesets with 5 changes to 3 files
132 pulling subrepo ss from .../sub/t/s/ss
132 pulling subrepo s/ss from .../sub/t/s/ss
133 requesting all changes
133 requesting all changes
134 adding changesets
134 adding changesets
135 adding manifests
135 adding manifests
136 adding file changes
136 adding file changes
137 added 1 changesets with 1 changes to 1 files
137 added 1 changesets with 1 changes to 1 files
138 pulling subrepo t from .../sub/t/t
138 pulling subrepo t from .../sub/t/t
139 requesting all changes
139 requesting all changes
140 adding changesets
140 adding changesets
141 adding manifests
141 adding manifests
142 adding file changes
142 adding file changes
143 added 4 changesets with 4 changes to 1 files (+1 heads)
143 added 4 changesets with 4 changes to 1 files (+1 heads)
144 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
144 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
145 path s
145 path s
146 source s
146 source s
147 revision fc627a69481fcbe5f1135069e8a3881c023e4cf5
147 revision fc627a69481fcbe5f1135069e8a3881c023e4cf5
148 path t
148 path t
149 source t
149 source t
150 revision 20a0db6fbf6c3d2836e6519a642ae929bfc67c0e
150 revision 20a0db6fbf6c3d2836e6519a642ae929bfc67c0e
151 % push
151 % push
152 committing subrepository t
152 committing subrepository t
153 pushing ...sub/t
153 pushing ...sub/t
154 pushing ...sub/t/s/ss
154 pushing ...sub/t/s/ss
155 searching for changes
155 searching for changes
156 no changes found
156 no changes found
157 pushing ...sub/t/s
157 pushing ...sub/t/s
158 searching for changes
158 searching for changes
159 no changes found
159 no changes found
160 pushing ...sub/t/t
160 pushing ...sub/t/t
161 searching for changes
161 searching for changes
162 adding changesets
162 adding changesets
163 adding manifests
163 adding manifests
164 adding file changes
164 adding file changes
165 added 1 changesets with 1 changes to 1 files
165 added 1 changesets with 1 changes to 1 files
166 searching for changes
166 searching for changes
167 adding changesets
167 adding changesets
168 adding manifests
168 adding manifests
169 adding file changes
169 adding file changes
170 added 1 changesets with 1 changes to 1 files
170 added 1 changesets with 1 changes to 1 files
171 % push -f
171 % push -f
172 committing subrepository s
172 committing subrepository s
173 abort: push creates new remote heads on branch 'default'!
173 abort: push creates new remote heads on branch 'default'!
174 pushing ...sub/t
174 pushing ...sub/t
175 pushing ...sub/t/s/ss
175 pushing ...sub/t/s/ss
176 searching for changes
176 searching for changes
177 no changes found
177 no changes found
178 pushing ...sub/t/s
178 pushing ...sub/t/s
179 searching for changes
179 searching for changes
180 (did you forget to merge? use push -f to force)
180 (did you forget to merge? use push -f to force)
181 pushing ...sub/t
181 pushing ...sub/t
182 pushing ...sub/t/s/ss
182 pushing ...sub/t/s/ss
183 searching for changes
183 searching for changes
184 no changes found
184 no changes found
185 pushing ...sub/t/s
185 pushing ...sub/t/s
186 searching for changes
186 searching for changes
187 adding changesets
187 adding changesets
188 adding manifests
188 adding manifests
189 adding file changes
189 adding file changes
190 added 1 changesets with 1 changes to 1 files (+1 heads)
190 added 1 changesets with 1 changes to 1 files (+1 heads)
191 pushing ...sub/t/t
191 pushing ...sub/t/t
192 searching for changes
192 searching for changes
193 no changes found
193 no changes found
194 searching for changes
194 searching for changes
195 adding changesets
195 adding changesets
196 adding manifests
196 adding manifests
197 adding file changes
197 adding file changes
198 added 1 changesets with 1 changes to 1 files
198 added 1 changesets with 1 changes to 1 files
199 % update
199 % update
200 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
200 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
201 committing subrepository t
201 committing subrepository t
202 % pull
202 % pull
203 pulling ...sub/t
203 pulling ...sub/t
204 searching for changes
204 searching for changes
205 adding changesets
205 adding changesets
206 adding manifests
206 adding manifests
207 adding file changes
207 adding file changes
208 added 1 changesets with 1 changes to 1 files
208 added 1 changesets with 1 changes to 1 files
209 (run 'hg update' to get a working copy)
209 (run 'hg update' to get a working copy)
210 pulling subrepo t from .../sub/t/t
210 pulling subrepo t from .../sub/t/t
211 searching for changes
211 searching for changes
212 adding changesets
212 adding changesets
213 adding manifests
213 adding manifests
214 adding file changes
214 adding file changes
215 added 1 changesets with 1 changes to 1 files
215 added 1 changesets with 1 changes to 1 files
216 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
216 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
217 blah
217 blah
218 % bogus subrepo path aborts
218 % bogus subrepo path aborts
219 abort: missing ] in subrepo source
219 abort: missing ] in subrepo source
220 % issue 1986
220 % issue 1986
221 adding a
221 adding a
222 marked working directory as branch br
222 marked working directory as branch br
223 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
223 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
224 adding b
224 adding b
225 created new head
225 created new head
226 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
226 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
227 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
227 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
228 (branch merge, don't forget to commit)
228 (branch merge, don't forget to commit)
229 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
229 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
230 adding c
230 adding c
231 created new head
231 created new head
232 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
232 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
233 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
233 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
234 (branch merge, don't forget to commit)
234 (branch merge, don't forget to commit)
235 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
235 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
236 adding .hgsub
236 adding .hgsub
237 committing subrepository s
237 committing subrepository s
238 marked working directory as branch br
238 marked working directory as branch br
239 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
239 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
240 adding b
240 adding b
241 committing subrepository s
241 committing subrepository s
242 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
242 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
243 adding c
243 adding c
244 created new head
244 created new head
245 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
245 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
246 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
246 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
247 (branch merge, don't forget to commit)
247 (branch merge, don't forget to commit)
248 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
248 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
249 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
249 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
250 adding d
250 adding d
251 committing subrepository s
251 committing subrepository s
252 created new head
252 created new head
253 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
253 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
254 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
254 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
255 adding e
255 adding e
256 committing subrepository s
256 committing subrepository s
257 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
257 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
258 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
258 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
259 (branch merge, don't forget to commit)
259 (branch merge, don't forget to commit)
260 % test subrepo delete from .hgsubstate
260 % test subrepo delete from .hgsubstate
261 adding testdelete/nested/foo
261 adding testdelete/nested/foo
262 adding testdelete/nested2/foo
262 adding testdelete/nested2/foo
263 adding testdelete/.hgsub
263 adding testdelete/.hgsub
264 committing subrepository nested2
264 committing subrepository nested2
265 committing subrepository nested
265 committing subrepository nested
266 nested
266 nested
267 % test repository cloning
267 % test repository cloning
268 adding nested_absolute/foo
268 adding nested_absolute/foo
269 adding nested_relative/foo2
269 adding nested_relative/foo2
270 adding main/.hgsub
270 adding main/.hgsub
271 committing subrepository nested_relative
271 committing subrepository nested_relative
272 committing subrepository nested_absolute
272 committing subrepository nested_absolute
273 updating to branch default
273 updating to branch default
274 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
274 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
275 [paths]
275 [paths]
276 default = $HGTMP/test-subrepo/sub/mercurial/nested_absolute
276 default = $HGTMP/test-subrepo/sub/mercurial/nested_absolute
277 [paths]
277 [paths]
278 default = $HGTMP/test-subrepo/sub/mercurial/nested_relative
278 default = $HGTMP/test-subrepo/sub/mercurial/nested_relative
279 % issue 1977
279 % issue 1977
280 adding a
280 adding a
281 adding .hgsub
281 adding .hgsub
282 committing subrepository s
282 committing subrepository s
283 updating to branch default
283 updating to branch default
284 pulling subrepo s from .../sub/repo/s
284 pulling subrepo s from .../sub/repo/s
285 requesting all changes
285 requesting all changes
286 adding changesets
286 adding changesets
287 adding manifests
287 adding manifests
288 adding file changes
288 adding file changes
289 added 1 changesets with 1 changes to 1 files
289 added 1 changesets with 1 changes to 1 files
290 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
290 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
291 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
291 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
292 created new head
292 created new head
293 committing subrepository s
293 committing subrepository s
294 abort: push creates new remote heads on branch 'default'!
294 abort: push creates new remote heads on branch 'default'!
295 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
295 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
General Comments 0
You need to be logged in to leave comments. Login now