##// END OF EJS Templates
prepush: add more precise error messages...
Benoit Boissinot -
r10396:65a90c8e default
parent child Browse files
Show More
@@ -1,2133 +1,2142 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo
10 import repo, changegroup, subrepo
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16 import tags as tags_
16 import tags as tags_
17 from lock import release
17 from lock import release
18 import weakref, stat, errno, os, time, inspect
18 import weakref, stat, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20
20
21 class localrepository(repo.repository):
21 class localrepository(repo.repository):
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
23 supported = set('revlogv1 store fncache shared'.split())
23 supported = set('revlogv1 store fncache shared'.split())
24
24
25 def __init__(self, baseui, path=None, create=0):
25 def __init__(self, baseui, path=None, create=0):
26 repo.repository.__init__(self)
26 repo.repository.__init__(self)
27 self.root = os.path.realpath(path)
27 self.root = os.path.realpath(path)
28 self.path = os.path.join(self.root, ".hg")
28 self.path = os.path.join(self.root, ".hg")
29 self.origroot = path
29 self.origroot = path
30 self.opener = util.opener(self.path)
30 self.opener = util.opener(self.path)
31 self.wopener = util.opener(self.root)
31 self.wopener = util.opener(self.root)
32 self.baseui = baseui
32 self.baseui = baseui
33 self.ui = baseui.copy()
33 self.ui = baseui.copy()
34
34
35 try:
35 try:
36 self.ui.readconfig(self.join("hgrc"), self.root)
36 self.ui.readconfig(self.join("hgrc"), self.root)
37 extensions.loadall(self.ui)
37 extensions.loadall(self.ui)
38 except IOError:
38 except IOError:
39 pass
39 pass
40
40
41 if not os.path.isdir(self.path):
41 if not os.path.isdir(self.path):
42 if create:
42 if create:
43 if not os.path.exists(path):
43 if not os.path.exists(path):
44 os.mkdir(path)
44 os.mkdir(path)
45 os.mkdir(self.path)
45 os.mkdir(self.path)
46 requirements = ["revlogv1"]
46 requirements = ["revlogv1"]
47 if self.ui.configbool('format', 'usestore', True):
47 if self.ui.configbool('format', 'usestore', True):
48 os.mkdir(os.path.join(self.path, "store"))
48 os.mkdir(os.path.join(self.path, "store"))
49 requirements.append("store")
49 requirements.append("store")
50 if self.ui.configbool('format', 'usefncache', True):
50 if self.ui.configbool('format', 'usefncache', True):
51 requirements.append("fncache")
51 requirements.append("fncache")
52 # create an invalid changelog
52 # create an invalid changelog
53 self.opener("00changelog.i", "a").write(
53 self.opener("00changelog.i", "a").write(
54 '\0\0\0\2' # represents revlogv2
54 '\0\0\0\2' # represents revlogv2
55 ' dummy changelog to prevent using the old repo layout'
55 ' dummy changelog to prevent using the old repo layout'
56 )
56 )
57 reqfile = self.opener("requires", "w")
57 reqfile = self.opener("requires", "w")
58 for r in requirements:
58 for r in requirements:
59 reqfile.write("%s\n" % r)
59 reqfile.write("%s\n" % r)
60 reqfile.close()
60 reqfile.close()
61 else:
61 else:
62 raise error.RepoError(_("repository %s not found") % path)
62 raise error.RepoError(_("repository %s not found") % path)
63 elif create:
63 elif create:
64 raise error.RepoError(_("repository %s already exists") % path)
64 raise error.RepoError(_("repository %s already exists") % path)
65 else:
65 else:
66 # find requirements
66 # find requirements
67 requirements = set()
67 requirements = set()
68 try:
68 try:
69 requirements = set(self.opener("requires").read().splitlines())
69 requirements = set(self.opener("requires").read().splitlines())
70 except IOError, inst:
70 except IOError, inst:
71 if inst.errno != errno.ENOENT:
71 if inst.errno != errno.ENOENT:
72 raise
72 raise
73 for r in requirements - self.supported:
73 for r in requirements - self.supported:
74 raise error.RepoError(_("requirement '%s' not supported") % r)
74 raise error.RepoError(_("requirement '%s' not supported") % r)
75
75
76 self.sharedpath = self.path
76 self.sharedpath = self.path
77 try:
77 try:
78 s = os.path.realpath(self.opener("sharedpath").read())
78 s = os.path.realpath(self.opener("sharedpath").read())
79 if not os.path.exists(s):
79 if not os.path.exists(s):
80 raise error.RepoError(
80 raise error.RepoError(
81 _('.hg/sharedpath points to nonexistent directory %s') % s)
81 _('.hg/sharedpath points to nonexistent directory %s') % s)
82 self.sharedpath = s
82 self.sharedpath = s
83 except IOError, inst:
83 except IOError, inst:
84 if inst.errno != errno.ENOENT:
84 if inst.errno != errno.ENOENT:
85 raise
85 raise
86
86
87 self.store = store.store(requirements, self.sharedpath, util.opener)
87 self.store = store.store(requirements, self.sharedpath, util.opener)
88 self.spath = self.store.path
88 self.spath = self.store.path
89 self.sopener = self.store.opener
89 self.sopener = self.store.opener
90 self.sjoin = self.store.join
90 self.sjoin = self.store.join
91 self.opener.createmode = self.store.createmode
91 self.opener.createmode = self.store.createmode
92 self.sopener.options = {}
92 self.sopener.options = {}
93
93
94 # These two define the set of tags for this repository. _tags
94 # These two define the set of tags for this repository. _tags
95 # maps tag name to node; _tagtypes maps tag name to 'global' or
95 # maps tag name to node; _tagtypes maps tag name to 'global' or
96 # 'local'. (Global tags are defined by .hgtags across all
96 # 'local'. (Global tags are defined by .hgtags across all
97 # heads, and local tags are defined in .hg/localtags.) They
97 # heads, and local tags are defined in .hg/localtags.) They
98 # constitute the in-memory cache of tags.
98 # constitute the in-memory cache of tags.
99 self._tags = None
99 self._tags = None
100 self._tagtypes = None
100 self._tagtypes = None
101
101
102 self._branchcache = None # in UTF-8
102 self._branchcache = None # in UTF-8
103 self._branchcachetip = None
103 self._branchcachetip = None
104 self.nodetagscache = None
104 self.nodetagscache = None
105 self.filterpats = {}
105 self.filterpats = {}
106 self._datafilters = {}
106 self._datafilters = {}
107 self._transref = self._lockref = self._wlockref = None
107 self._transref = self._lockref = self._wlockref = None
108
108
109 @propertycache
109 @propertycache
110 def changelog(self):
110 def changelog(self):
111 c = changelog.changelog(self.sopener)
111 c = changelog.changelog(self.sopener)
112 if 'HG_PENDING' in os.environ:
112 if 'HG_PENDING' in os.environ:
113 p = os.environ['HG_PENDING']
113 p = os.environ['HG_PENDING']
114 if p.startswith(self.root):
114 if p.startswith(self.root):
115 c.readpending('00changelog.i.a')
115 c.readpending('00changelog.i.a')
116 self.sopener.options['defversion'] = c.version
116 self.sopener.options['defversion'] = c.version
117 return c
117 return c
118
118
119 @propertycache
119 @propertycache
120 def manifest(self):
120 def manifest(self):
121 return manifest.manifest(self.sopener)
121 return manifest.manifest(self.sopener)
122
122
123 @propertycache
123 @propertycache
124 def dirstate(self):
124 def dirstate(self):
125 return dirstate.dirstate(self.opener, self.ui, self.root)
125 return dirstate.dirstate(self.opener, self.ui, self.root)
126
126
127 def __getitem__(self, changeid):
127 def __getitem__(self, changeid):
128 if changeid is None:
128 if changeid is None:
129 return context.workingctx(self)
129 return context.workingctx(self)
130 return context.changectx(self, changeid)
130 return context.changectx(self, changeid)
131
131
132 def __contains__(self, changeid):
132 def __contains__(self, changeid):
133 try:
133 try:
134 return bool(self.lookup(changeid))
134 return bool(self.lookup(changeid))
135 except error.RepoLookupError:
135 except error.RepoLookupError:
136 return False
136 return False
137
137
138 def __nonzero__(self):
138 def __nonzero__(self):
139 return True
139 return True
140
140
141 def __len__(self):
141 def __len__(self):
142 return len(self.changelog)
142 return len(self.changelog)
143
143
144 def __iter__(self):
144 def __iter__(self):
145 for i in xrange(len(self)):
145 for i in xrange(len(self)):
146 yield i
146 yield i
147
147
148 def url(self):
148 def url(self):
149 return 'file:' + self.root
149 return 'file:' + self.root
150
150
151 def hook(self, name, throw=False, **args):
151 def hook(self, name, throw=False, **args):
152 return hook.hook(self.ui, self, name, throw, **args)
152 return hook.hook(self.ui, self, name, throw, **args)
153
153
154 tag_disallowed = ':\r\n'
154 tag_disallowed = ':\r\n'
155
155
156 def _tag(self, names, node, message, local, user, date, extra={}):
156 def _tag(self, names, node, message, local, user, date, extra={}):
157 if isinstance(names, str):
157 if isinstance(names, str):
158 allchars = names
158 allchars = names
159 names = (names,)
159 names = (names,)
160 else:
160 else:
161 allchars = ''.join(names)
161 allchars = ''.join(names)
162 for c in self.tag_disallowed:
162 for c in self.tag_disallowed:
163 if c in allchars:
163 if c in allchars:
164 raise util.Abort(_('%r cannot be used in a tag name') % c)
164 raise util.Abort(_('%r cannot be used in a tag name') % c)
165
165
166 for name in names:
166 for name in names:
167 self.hook('pretag', throw=True, node=hex(node), tag=name,
167 self.hook('pretag', throw=True, node=hex(node), tag=name,
168 local=local)
168 local=local)
169
169
170 def writetags(fp, names, munge, prevtags):
170 def writetags(fp, names, munge, prevtags):
171 fp.seek(0, 2)
171 fp.seek(0, 2)
172 if prevtags and prevtags[-1] != '\n':
172 if prevtags and prevtags[-1] != '\n':
173 fp.write('\n')
173 fp.write('\n')
174 for name in names:
174 for name in names:
175 m = munge and munge(name) or name
175 m = munge and munge(name) or name
176 if self._tagtypes and name in self._tagtypes:
176 if self._tagtypes and name in self._tagtypes:
177 old = self._tags.get(name, nullid)
177 old = self._tags.get(name, nullid)
178 fp.write('%s %s\n' % (hex(old), m))
178 fp.write('%s %s\n' % (hex(old), m))
179 fp.write('%s %s\n' % (hex(node), m))
179 fp.write('%s %s\n' % (hex(node), m))
180 fp.close()
180 fp.close()
181
181
182 prevtags = ''
182 prevtags = ''
183 if local:
183 if local:
184 try:
184 try:
185 fp = self.opener('localtags', 'r+')
185 fp = self.opener('localtags', 'r+')
186 except IOError:
186 except IOError:
187 fp = self.opener('localtags', 'a')
187 fp = self.opener('localtags', 'a')
188 else:
188 else:
189 prevtags = fp.read()
189 prevtags = fp.read()
190
190
191 # local tags are stored in the current charset
191 # local tags are stored in the current charset
192 writetags(fp, names, None, prevtags)
192 writetags(fp, names, None, prevtags)
193 for name in names:
193 for name in names:
194 self.hook('tag', node=hex(node), tag=name, local=local)
194 self.hook('tag', node=hex(node), tag=name, local=local)
195 return
195 return
196
196
197 try:
197 try:
198 fp = self.wfile('.hgtags', 'rb+')
198 fp = self.wfile('.hgtags', 'rb+')
199 except IOError:
199 except IOError:
200 fp = self.wfile('.hgtags', 'ab')
200 fp = self.wfile('.hgtags', 'ab')
201 else:
201 else:
202 prevtags = fp.read()
202 prevtags = fp.read()
203
203
204 # committed tags are stored in UTF-8
204 # committed tags are stored in UTF-8
205 writetags(fp, names, encoding.fromlocal, prevtags)
205 writetags(fp, names, encoding.fromlocal, prevtags)
206
206
207 if '.hgtags' not in self.dirstate:
207 if '.hgtags' not in self.dirstate:
208 self.add(['.hgtags'])
208 self.add(['.hgtags'])
209
209
210 m = match_.exact(self.root, '', ['.hgtags'])
210 m = match_.exact(self.root, '', ['.hgtags'])
211 tagnode = self.commit(message, user, date, extra=extra, match=m)
211 tagnode = self.commit(message, user, date, extra=extra, match=m)
212
212
213 for name in names:
213 for name in names:
214 self.hook('tag', node=hex(node), tag=name, local=local)
214 self.hook('tag', node=hex(node), tag=name, local=local)
215
215
216 return tagnode
216 return tagnode
217
217
218 def tag(self, names, node, message, local, user, date):
218 def tag(self, names, node, message, local, user, date):
219 '''tag a revision with one or more symbolic names.
219 '''tag a revision with one or more symbolic names.
220
220
221 names is a list of strings or, when adding a single tag, names may be a
221 names is a list of strings or, when adding a single tag, names may be a
222 string.
222 string.
223
223
224 if local is True, the tags are stored in a per-repository file.
224 if local is True, the tags are stored in a per-repository file.
225 otherwise, they are stored in the .hgtags file, and a new
225 otherwise, they are stored in the .hgtags file, and a new
226 changeset is committed with the change.
226 changeset is committed with the change.
227
227
228 keyword arguments:
228 keyword arguments:
229
229
230 local: whether to store tags in non-version-controlled file
230 local: whether to store tags in non-version-controlled file
231 (default False)
231 (default False)
232
232
233 message: commit message to use if committing
233 message: commit message to use if committing
234
234
235 user: name of user to use if committing
235 user: name of user to use if committing
236
236
237 date: date tuple to use if committing'''
237 date: date tuple to use if committing'''
238
238
239 for x in self.status()[:5]:
239 for x in self.status()[:5]:
240 if '.hgtags' in x:
240 if '.hgtags' in x:
241 raise util.Abort(_('working copy of .hgtags is changed '
241 raise util.Abort(_('working copy of .hgtags is changed '
242 '(please commit .hgtags manually)'))
242 '(please commit .hgtags manually)'))
243
243
244 self.tags() # instantiate the cache
244 self.tags() # instantiate the cache
245 self._tag(names, node, message, local, user, date)
245 self._tag(names, node, message, local, user, date)
246
246
247 def tags(self):
247 def tags(self):
248 '''return a mapping of tag to node'''
248 '''return a mapping of tag to node'''
249 if self._tags is None:
249 if self._tags is None:
250 (self._tags, self._tagtypes) = self._findtags()
250 (self._tags, self._tagtypes) = self._findtags()
251
251
252 return self._tags
252 return self._tags
253
253
254 def _findtags(self):
254 def _findtags(self):
255 '''Do the hard work of finding tags. Return a pair of dicts
255 '''Do the hard work of finding tags. Return a pair of dicts
256 (tags, tagtypes) where tags maps tag name to node, and tagtypes
256 (tags, tagtypes) where tags maps tag name to node, and tagtypes
257 maps tag name to a string like \'global\' or \'local\'.
257 maps tag name to a string like \'global\' or \'local\'.
258 Subclasses or extensions are free to add their own tags, but
258 Subclasses or extensions are free to add their own tags, but
259 should be aware that the returned dicts will be retained for the
259 should be aware that the returned dicts will be retained for the
260 duration of the localrepo object.'''
260 duration of the localrepo object.'''
261
261
262 # XXX what tagtype should subclasses/extensions use? Currently
262 # XXX what tagtype should subclasses/extensions use? Currently
263 # mq and bookmarks add tags, but do not set the tagtype at all.
263 # mq and bookmarks add tags, but do not set the tagtype at all.
264 # Should each extension invent its own tag type? Should there
264 # Should each extension invent its own tag type? Should there
265 # be one tagtype for all such "virtual" tags? Or is the status
265 # be one tagtype for all such "virtual" tags? Or is the status
266 # quo fine?
266 # quo fine?
267
267
268 alltags = {} # map tag name to (node, hist)
268 alltags = {} # map tag name to (node, hist)
269 tagtypes = {}
269 tagtypes = {}
270
270
271 tags_.findglobaltags(self.ui, self, alltags, tagtypes)
271 tags_.findglobaltags(self.ui, self, alltags, tagtypes)
272 tags_.readlocaltags(self.ui, self, alltags, tagtypes)
272 tags_.readlocaltags(self.ui, self, alltags, tagtypes)
273
273
274 # Build the return dicts. Have to re-encode tag names because
274 # Build the return dicts. Have to re-encode tag names because
275 # the tags module always uses UTF-8 (in order not to lose info
275 # the tags module always uses UTF-8 (in order not to lose info
276 # writing to the cache), but the rest of Mercurial wants them in
276 # writing to the cache), but the rest of Mercurial wants them in
277 # local encoding.
277 # local encoding.
278 tags = {}
278 tags = {}
279 for (name, (node, hist)) in alltags.iteritems():
279 for (name, (node, hist)) in alltags.iteritems():
280 if node != nullid:
280 if node != nullid:
281 tags[encoding.tolocal(name)] = node
281 tags[encoding.tolocal(name)] = node
282 tags['tip'] = self.changelog.tip()
282 tags['tip'] = self.changelog.tip()
283 tagtypes = dict([(encoding.tolocal(name), value)
283 tagtypes = dict([(encoding.tolocal(name), value)
284 for (name, value) in tagtypes.iteritems()])
284 for (name, value) in tagtypes.iteritems()])
285 return (tags, tagtypes)
285 return (tags, tagtypes)
286
286
287 def tagtype(self, tagname):
287 def tagtype(self, tagname):
288 '''
288 '''
289 return the type of the given tag. result can be:
289 return the type of the given tag. result can be:
290
290
291 'local' : a local tag
291 'local' : a local tag
292 'global' : a global tag
292 'global' : a global tag
293 None : tag does not exist
293 None : tag does not exist
294 '''
294 '''
295
295
296 self.tags()
296 self.tags()
297
297
298 return self._tagtypes.get(tagname)
298 return self._tagtypes.get(tagname)
299
299
300 def tagslist(self):
300 def tagslist(self):
301 '''return a list of tags ordered by revision'''
301 '''return a list of tags ordered by revision'''
302 l = []
302 l = []
303 for t, n in self.tags().iteritems():
303 for t, n in self.tags().iteritems():
304 try:
304 try:
305 r = self.changelog.rev(n)
305 r = self.changelog.rev(n)
306 except:
306 except:
307 r = -2 # sort to the beginning of the list if unknown
307 r = -2 # sort to the beginning of the list if unknown
308 l.append((r, t, n))
308 l.append((r, t, n))
309 return [(t, n) for r, t, n in sorted(l)]
309 return [(t, n) for r, t, n in sorted(l)]
310
310
311 def nodetags(self, node):
311 def nodetags(self, node):
312 '''return the tags associated with a node'''
312 '''return the tags associated with a node'''
313 if not self.nodetagscache:
313 if not self.nodetagscache:
314 self.nodetagscache = {}
314 self.nodetagscache = {}
315 for t, n in self.tags().iteritems():
315 for t, n in self.tags().iteritems():
316 self.nodetagscache.setdefault(n, []).append(t)
316 self.nodetagscache.setdefault(n, []).append(t)
317 return self.nodetagscache.get(node, [])
317 return self.nodetagscache.get(node, [])
318
318
319 def _branchtags(self, partial, lrev):
319 def _branchtags(self, partial, lrev):
320 # TODO: rename this function?
320 # TODO: rename this function?
321 tiprev = len(self) - 1
321 tiprev = len(self) - 1
322 if lrev != tiprev:
322 if lrev != tiprev:
323 self._updatebranchcache(partial, lrev + 1, tiprev + 1)
323 self._updatebranchcache(partial, lrev + 1, tiprev + 1)
324 self._writebranchcache(partial, self.changelog.tip(), tiprev)
324 self._writebranchcache(partial, self.changelog.tip(), tiprev)
325
325
326 return partial
326 return partial
327
327
328 def branchmap(self):
328 def branchmap(self):
329 '''returns a dictionary {branch: [branchheads]}'''
329 '''returns a dictionary {branch: [branchheads]}'''
330 tip = self.changelog.tip()
330 tip = self.changelog.tip()
331 if self._branchcache is not None and self._branchcachetip == tip:
331 if self._branchcache is not None and self._branchcachetip == tip:
332 return self._branchcache
332 return self._branchcache
333
333
334 oldtip = self._branchcachetip
334 oldtip = self._branchcachetip
335 self._branchcachetip = tip
335 self._branchcachetip = tip
336 if oldtip is None or oldtip not in self.changelog.nodemap:
336 if oldtip is None or oldtip not in self.changelog.nodemap:
337 partial, last, lrev = self._readbranchcache()
337 partial, last, lrev = self._readbranchcache()
338 else:
338 else:
339 lrev = self.changelog.rev(oldtip)
339 lrev = self.changelog.rev(oldtip)
340 partial = self._branchcache
340 partial = self._branchcache
341
341
342 self._branchtags(partial, lrev)
342 self._branchtags(partial, lrev)
343 # this private cache holds all heads (not just tips)
343 # this private cache holds all heads (not just tips)
344 self._branchcache = partial
344 self._branchcache = partial
345
345
346 return self._branchcache
346 return self._branchcache
347
347
348 def branchtags(self):
348 def branchtags(self):
349 '''return a dict where branch names map to the tipmost head of
349 '''return a dict where branch names map to the tipmost head of
350 the branch, open heads come before closed'''
350 the branch, open heads come before closed'''
351 bt = {}
351 bt = {}
352 for bn, heads in self.branchmap().iteritems():
352 for bn, heads in self.branchmap().iteritems():
353 tip = heads[-1]
353 tip = heads[-1]
354 for h in reversed(heads):
354 for h in reversed(heads):
355 if 'close' not in self.changelog.read(h)[5]:
355 if 'close' not in self.changelog.read(h)[5]:
356 tip = h
356 tip = h
357 break
357 break
358 bt[bn] = tip
358 bt[bn] = tip
359 return bt
359 return bt
360
360
361
361
362 def _readbranchcache(self):
362 def _readbranchcache(self):
363 partial = {}
363 partial = {}
364 try:
364 try:
365 f = self.opener("branchheads.cache")
365 f = self.opener("branchheads.cache")
366 lines = f.read().split('\n')
366 lines = f.read().split('\n')
367 f.close()
367 f.close()
368 except (IOError, OSError):
368 except (IOError, OSError):
369 return {}, nullid, nullrev
369 return {}, nullid, nullrev
370
370
371 try:
371 try:
372 last, lrev = lines.pop(0).split(" ", 1)
372 last, lrev = lines.pop(0).split(" ", 1)
373 last, lrev = bin(last), int(lrev)
373 last, lrev = bin(last), int(lrev)
374 if lrev >= len(self) or self[lrev].node() != last:
374 if lrev >= len(self) or self[lrev].node() != last:
375 # invalidate the cache
375 # invalidate the cache
376 raise ValueError('invalidating branch cache (tip differs)')
376 raise ValueError('invalidating branch cache (tip differs)')
377 for l in lines:
377 for l in lines:
378 if not l:
378 if not l:
379 continue
379 continue
380 node, label = l.split(" ", 1)
380 node, label = l.split(" ", 1)
381 partial.setdefault(label.strip(), []).append(bin(node))
381 partial.setdefault(label.strip(), []).append(bin(node))
382 except KeyboardInterrupt:
382 except KeyboardInterrupt:
383 raise
383 raise
384 except Exception, inst:
384 except Exception, inst:
385 if self.ui.debugflag:
385 if self.ui.debugflag:
386 self.ui.warn(str(inst), '\n')
386 self.ui.warn(str(inst), '\n')
387 partial, last, lrev = {}, nullid, nullrev
387 partial, last, lrev = {}, nullid, nullrev
388 return partial, last, lrev
388 return partial, last, lrev
389
389
390 def _writebranchcache(self, branches, tip, tiprev):
390 def _writebranchcache(self, branches, tip, tiprev):
391 try:
391 try:
392 f = self.opener("branchheads.cache", "w", atomictemp=True)
392 f = self.opener("branchheads.cache", "w", atomictemp=True)
393 f.write("%s %s\n" % (hex(tip), tiprev))
393 f.write("%s %s\n" % (hex(tip), tiprev))
394 for label, nodes in branches.iteritems():
394 for label, nodes in branches.iteritems():
395 for node in nodes:
395 for node in nodes:
396 f.write("%s %s\n" % (hex(node), label))
396 f.write("%s %s\n" % (hex(node), label))
397 f.rename()
397 f.rename()
398 except (IOError, OSError):
398 except (IOError, OSError):
399 pass
399 pass
400
400
401 def _updatebranchcache(self, partial, start, end):
401 def _updatebranchcache(self, partial, start, end):
402 # collect new branch entries
402 # collect new branch entries
403 newbranches = {}
403 newbranches = {}
404 for r in xrange(start, end):
404 for r in xrange(start, end):
405 c = self[r]
405 c = self[r]
406 newbranches.setdefault(c.branch(), []).append(c.node())
406 newbranches.setdefault(c.branch(), []).append(c.node())
407 # if older branchheads are reachable from new ones, they aren't
407 # if older branchheads are reachable from new ones, they aren't
408 # really branchheads. Note checking parents is insufficient:
408 # really branchheads. Note checking parents is insufficient:
409 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
409 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
410 for branch, newnodes in newbranches.iteritems():
410 for branch, newnodes in newbranches.iteritems():
411 bheads = partial.setdefault(branch, [])
411 bheads = partial.setdefault(branch, [])
412 bheads.extend(newnodes)
412 bheads.extend(newnodes)
413 if len(bheads) < 2:
413 if len(bheads) < 2:
414 continue
414 continue
415 newbheads = []
415 newbheads = []
416 # starting from tip means fewer passes over reachable
416 # starting from tip means fewer passes over reachable
417 while newnodes:
417 while newnodes:
418 latest = newnodes.pop()
418 latest = newnodes.pop()
419 if latest not in bheads:
419 if latest not in bheads:
420 continue
420 continue
421 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
421 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
422 reachable = self.changelog.reachable(latest, minbhrev)
422 reachable = self.changelog.reachable(latest, minbhrev)
423 bheads = [b for b in bheads if b not in reachable]
423 bheads = [b for b in bheads if b not in reachable]
424 newbheads.insert(0, latest)
424 newbheads.insert(0, latest)
425 bheads.extend(newbheads)
425 bheads.extend(newbheads)
426 partial[branch] = bheads
426 partial[branch] = bheads
427
427
428 def lookup(self, key):
428 def lookup(self, key):
429 if isinstance(key, int):
429 if isinstance(key, int):
430 return self.changelog.node(key)
430 return self.changelog.node(key)
431 elif key == '.':
431 elif key == '.':
432 return self.dirstate.parents()[0]
432 return self.dirstate.parents()[0]
433 elif key == 'null':
433 elif key == 'null':
434 return nullid
434 return nullid
435 elif key == 'tip':
435 elif key == 'tip':
436 return self.changelog.tip()
436 return self.changelog.tip()
437 n = self.changelog._match(key)
437 n = self.changelog._match(key)
438 if n:
438 if n:
439 return n
439 return n
440 if key in self.tags():
440 if key in self.tags():
441 return self.tags()[key]
441 return self.tags()[key]
442 if key in self.branchtags():
442 if key in self.branchtags():
443 return self.branchtags()[key]
443 return self.branchtags()[key]
444 n = self.changelog._partialmatch(key)
444 n = self.changelog._partialmatch(key)
445 if n:
445 if n:
446 return n
446 return n
447
447
448 # can't find key, check if it might have come from damaged dirstate
448 # can't find key, check if it might have come from damaged dirstate
449 if key in self.dirstate.parents():
449 if key in self.dirstate.parents():
450 raise error.Abort(_("working directory has unknown parent '%s'!")
450 raise error.Abort(_("working directory has unknown parent '%s'!")
451 % short(key))
451 % short(key))
452 try:
452 try:
453 if len(key) == 20:
453 if len(key) == 20:
454 key = hex(key)
454 key = hex(key)
455 except:
455 except:
456 pass
456 pass
457 raise error.RepoLookupError(_("unknown revision '%s'") % key)
457 raise error.RepoLookupError(_("unknown revision '%s'") % key)
458
458
459 def local(self):
459 def local(self):
460 return True
460 return True
461
461
462 def join(self, f):
462 def join(self, f):
463 return os.path.join(self.path, f)
463 return os.path.join(self.path, f)
464
464
465 def wjoin(self, f):
465 def wjoin(self, f):
466 return os.path.join(self.root, f)
466 return os.path.join(self.root, f)
467
467
468 def rjoin(self, f):
468 def rjoin(self, f):
469 return os.path.join(self.root, util.pconvert(f))
469 return os.path.join(self.root, util.pconvert(f))
470
470
471 def file(self, f):
471 def file(self, f):
472 if f[0] == '/':
472 if f[0] == '/':
473 f = f[1:]
473 f = f[1:]
474 return filelog.filelog(self.sopener, f)
474 return filelog.filelog(self.sopener, f)
475
475
476 def changectx(self, changeid):
476 def changectx(self, changeid):
477 return self[changeid]
477 return self[changeid]
478
478
479 def parents(self, changeid=None):
479 def parents(self, changeid=None):
480 '''get list of changectxs for parents of changeid'''
480 '''get list of changectxs for parents of changeid'''
481 return self[changeid].parents()
481 return self[changeid].parents()
482
482
483 def filectx(self, path, changeid=None, fileid=None):
483 def filectx(self, path, changeid=None, fileid=None):
484 """changeid can be a changeset revision, node, or tag.
484 """changeid can be a changeset revision, node, or tag.
485 fileid can be a file revision or node."""
485 fileid can be a file revision or node."""
486 return context.filectx(self, path, changeid, fileid)
486 return context.filectx(self, path, changeid, fileid)
487
487
488 def getcwd(self):
488 def getcwd(self):
489 return self.dirstate.getcwd()
489 return self.dirstate.getcwd()
490
490
491 def pathto(self, f, cwd=None):
491 def pathto(self, f, cwd=None):
492 return self.dirstate.pathto(f, cwd)
492 return self.dirstate.pathto(f, cwd)
493
493
494 def wfile(self, f, mode='r'):
494 def wfile(self, f, mode='r'):
495 return self.wopener(f, mode)
495 return self.wopener(f, mode)
496
496
497 def _link(self, f):
497 def _link(self, f):
498 return os.path.islink(self.wjoin(f))
498 return os.path.islink(self.wjoin(f))
499
499
500 def _filter(self, filter, filename, data):
500 def _filter(self, filter, filename, data):
501 if filter not in self.filterpats:
501 if filter not in self.filterpats:
502 l = []
502 l = []
503 for pat, cmd in self.ui.configitems(filter):
503 for pat, cmd in self.ui.configitems(filter):
504 if cmd == '!':
504 if cmd == '!':
505 continue
505 continue
506 mf = match_.match(self.root, '', [pat])
506 mf = match_.match(self.root, '', [pat])
507 fn = None
507 fn = None
508 params = cmd
508 params = cmd
509 for name, filterfn in self._datafilters.iteritems():
509 for name, filterfn in self._datafilters.iteritems():
510 if cmd.startswith(name):
510 if cmd.startswith(name):
511 fn = filterfn
511 fn = filterfn
512 params = cmd[len(name):].lstrip()
512 params = cmd[len(name):].lstrip()
513 break
513 break
514 if not fn:
514 if not fn:
515 fn = lambda s, c, **kwargs: util.filter(s, c)
515 fn = lambda s, c, **kwargs: util.filter(s, c)
516 # Wrap old filters not supporting keyword arguments
516 # Wrap old filters not supporting keyword arguments
517 if not inspect.getargspec(fn)[2]:
517 if not inspect.getargspec(fn)[2]:
518 oldfn = fn
518 oldfn = fn
519 fn = lambda s, c, **kwargs: oldfn(s, c)
519 fn = lambda s, c, **kwargs: oldfn(s, c)
520 l.append((mf, fn, params))
520 l.append((mf, fn, params))
521 self.filterpats[filter] = l
521 self.filterpats[filter] = l
522
522
523 for mf, fn, cmd in self.filterpats[filter]:
523 for mf, fn, cmd in self.filterpats[filter]:
524 if mf(filename):
524 if mf(filename):
525 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
525 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
526 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
526 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
527 break
527 break
528
528
529 return data
529 return data
530
530
531 def adddatafilter(self, name, filter):
531 def adddatafilter(self, name, filter):
532 self._datafilters[name] = filter
532 self._datafilters[name] = filter
533
533
534 def wread(self, filename):
534 def wread(self, filename):
535 if self._link(filename):
535 if self._link(filename):
536 data = os.readlink(self.wjoin(filename))
536 data = os.readlink(self.wjoin(filename))
537 else:
537 else:
538 data = self.wopener(filename, 'r').read()
538 data = self.wopener(filename, 'r').read()
539 return self._filter("encode", filename, data)
539 return self._filter("encode", filename, data)
540
540
541 def wwrite(self, filename, data, flags):
541 def wwrite(self, filename, data, flags):
542 data = self._filter("decode", filename, data)
542 data = self._filter("decode", filename, data)
543 try:
543 try:
544 os.unlink(self.wjoin(filename))
544 os.unlink(self.wjoin(filename))
545 except OSError:
545 except OSError:
546 pass
546 pass
547 if 'l' in flags:
547 if 'l' in flags:
548 self.wopener.symlink(data, filename)
548 self.wopener.symlink(data, filename)
549 else:
549 else:
550 self.wopener(filename, 'w').write(data)
550 self.wopener(filename, 'w').write(data)
551 if 'x' in flags:
551 if 'x' in flags:
552 util.set_flags(self.wjoin(filename), False, True)
552 util.set_flags(self.wjoin(filename), False, True)
553
553
554 def wwritedata(self, filename, data):
554 def wwritedata(self, filename, data):
555 return self._filter("decode", filename, data)
555 return self._filter("decode", filename, data)
556
556
557 def transaction(self):
557 def transaction(self):
558 tr = self._transref and self._transref() or None
558 tr = self._transref and self._transref() or None
559 if tr and tr.running():
559 if tr and tr.running():
560 return tr.nest()
560 return tr.nest()
561
561
562 # abort here if the journal already exists
562 # abort here if the journal already exists
563 if os.path.exists(self.sjoin("journal")):
563 if os.path.exists(self.sjoin("journal")):
564 raise error.RepoError(
564 raise error.RepoError(
565 _("abandoned transaction found - run hg recover"))
565 _("abandoned transaction found - run hg recover"))
566
566
567 # save dirstate for rollback
567 # save dirstate for rollback
568 try:
568 try:
569 ds = self.opener("dirstate").read()
569 ds = self.opener("dirstate").read()
570 except IOError:
570 except IOError:
571 ds = ""
571 ds = ""
572 self.opener("journal.dirstate", "w").write(ds)
572 self.opener("journal.dirstate", "w").write(ds)
573 self.opener("journal.branch", "w").write(self.dirstate.branch())
573 self.opener("journal.branch", "w").write(self.dirstate.branch())
574
574
575 renames = [(self.sjoin("journal"), self.sjoin("undo")),
575 renames = [(self.sjoin("journal"), self.sjoin("undo")),
576 (self.join("journal.dirstate"), self.join("undo.dirstate")),
576 (self.join("journal.dirstate"), self.join("undo.dirstate")),
577 (self.join("journal.branch"), self.join("undo.branch"))]
577 (self.join("journal.branch"), self.join("undo.branch"))]
578 tr = transaction.transaction(self.ui.warn, self.sopener,
578 tr = transaction.transaction(self.ui.warn, self.sopener,
579 self.sjoin("journal"),
579 self.sjoin("journal"),
580 aftertrans(renames),
580 aftertrans(renames),
581 self.store.createmode)
581 self.store.createmode)
582 self._transref = weakref.ref(tr)
582 self._transref = weakref.ref(tr)
583 return tr
583 return tr
584
584
585 def recover(self):
585 def recover(self):
586 lock = self.lock()
586 lock = self.lock()
587 try:
587 try:
588 if os.path.exists(self.sjoin("journal")):
588 if os.path.exists(self.sjoin("journal")):
589 self.ui.status(_("rolling back interrupted transaction\n"))
589 self.ui.status(_("rolling back interrupted transaction\n"))
590 transaction.rollback(self.sopener, self.sjoin("journal"),
590 transaction.rollback(self.sopener, self.sjoin("journal"),
591 self.ui.warn)
591 self.ui.warn)
592 self.invalidate()
592 self.invalidate()
593 return True
593 return True
594 else:
594 else:
595 self.ui.warn(_("no interrupted transaction available\n"))
595 self.ui.warn(_("no interrupted transaction available\n"))
596 return False
596 return False
597 finally:
597 finally:
598 lock.release()
598 lock.release()
599
599
600 def rollback(self):
600 def rollback(self):
601 wlock = lock = None
601 wlock = lock = None
602 try:
602 try:
603 wlock = self.wlock()
603 wlock = self.wlock()
604 lock = self.lock()
604 lock = self.lock()
605 if os.path.exists(self.sjoin("undo")):
605 if os.path.exists(self.sjoin("undo")):
606 self.ui.status(_("rolling back last transaction\n"))
606 self.ui.status(_("rolling back last transaction\n"))
607 transaction.rollback(self.sopener, self.sjoin("undo"),
607 transaction.rollback(self.sopener, self.sjoin("undo"),
608 self.ui.warn)
608 self.ui.warn)
609 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
609 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
610 try:
610 try:
611 branch = self.opener("undo.branch").read()
611 branch = self.opener("undo.branch").read()
612 self.dirstate.setbranch(branch)
612 self.dirstate.setbranch(branch)
613 except IOError:
613 except IOError:
614 self.ui.warn(_("Named branch could not be reset, "
614 self.ui.warn(_("Named branch could not be reset, "
615 "current branch still is: %s\n")
615 "current branch still is: %s\n")
616 % encoding.tolocal(self.dirstate.branch()))
616 % encoding.tolocal(self.dirstate.branch()))
617 self.invalidate()
617 self.invalidate()
618 self.dirstate.invalidate()
618 self.dirstate.invalidate()
619 self.destroyed()
619 self.destroyed()
620 else:
620 else:
621 self.ui.warn(_("no rollback information available\n"))
621 self.ui.warn(_("no rollback information available\n"))
622 finally:
622 finally:
623 release(lock, wlock)
623 release(lock, wlock)
624
624
625 def invalidate(self):
625 def invalidate(self):
626 for a in "changelog manifest".split():
626 for a in "changelog manifest".split():
627 if a in self.__dict__:
627 if a in self.__dict__:
628 delattr(self, a)
628 delattr(self, a)
629 self._tags = None
629 self._tags = None
630 self._tagtypes = None
630 self._tagtypes = None
631 self.nodetagscache = None
631 self.nodetagscache = None
632 self._branchcache = None # in UTF-8
632 self._branchcache = None # in UTF-8
633 self._branchcachetip = None
633 self._branchcachetip = None
634
634
635 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
635 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
636 try:
636 try:
637 l = lock.lock(lockname, 0, releasefn, desc=desc)
637 l = lock.lock(lockname, 0, releasefn, desc=desc)
638 except error.LockHeld, inst:
638 except error.LockHeld, inst:
639 if not wait:
639 if not wait:
640 raise
640 raise
641 self.ui.warn(_("waiting for lock on %s held by %r\n") %
641 self.ui.warn(_("waiting for lock on %s held by %r\n") %
642 (desc, inst.locker))
642 (desc, inst.locker))
643 # default to 600 seconds timeout
643 # default to 600 seconds timeout
644 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
644 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
645 releasefn, desc=desc)
645 releasefn, desc=desc)
646 if acquirefn:
646 if acquirefn:
647 acquirefn()
647 acquirefn()
648 return l
648 return l
649
649
650 def lock(self, wait=True):
650 def lock(self, wait=True):
651 '''Lock the repository store (.hg/store) and return a weak reference
651 '''Lock the repository store (.hg/store) and return a weak reference
652 to the lock. Use this before modifying the store (e.g. committing or
652 to the lock. Use this before modifying the store (e.g. committing or
653 stripping). If you are opening a transaction, get a lock as well.)'''
653 stripping). If you are opening a transaction, get a lock as well.)'''
654 l = self._lockref and self._lockref()
654 l = self._lockref and self._lockref()
655 if l is not None and l.held:
655 if l is not None and l.held:
656 l.lock()
656 l.lock()
657 return l
657 return l
658
658
659 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
659 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
660 _('repository %s') % self.origroot)
660 _('repository %s') % self.origroot)
661 self._lockref = weakref.ref(l)
661 self._lockref = weakref.ref(l)
662 return l
662 return l
663
663
664 def wlock(self, wait=True):
664 def wlock(self, wait=True):
665 '''Lock the non-store parts of the repository (everything under
665 '''Lock the non-store parts of the repository (everything under
666 .hg except .hg/store) and return a weak reference to the lock.
666 .hg except .hg/store) and return a weak reference to the lock.
667 Use this before modifying files in .hg.'''
667 Use this before modifying files in .hg.'''
668 l = self._wlockref and self._wlockref()
668 l = self._wlockref and self._wlockref()
669 if l is not None and l.held:
669 if l is not None and l.held:
670 l.lock()
670 l.lock()
671 return l
671 return l
672
672
673 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
673 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
674 self.dirstate.invalidate, _('working directory of %s') %
674 self.dirstate.invalidate, _('working directory of %s') %
675 self.origroot)
675 self.origroot)
676 self._wlockref = weakref.ref(l)
676 self._wlockref = weakref.ref(l)
677 return l
677 return l
678
678
679 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
679 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
680 """
680 """
681 commit an individual file as part of a larger transaction
681 commit an individual file as part of a larger transaction
682 """
682 """
683
683
684 fname = fctx.path()
684 fname = fctx.path()
685 text = fctx.data()
685 text = fctx.data()
686 flog = self.file(fname)
686 flog = self.file(fname)
687 fparent1 = manifest1.get(fname, nullid)
687 fparent1 = manifest1.get(fname, nullid)
688 fparent2 = fparent2o = manifest2.get(fname, nullid)
688 fparent2 = fparent2o = manifest2.get(fname, nullid)
689
689
690 meta = {}
690 meta = {}
691 copy = fctx.renamed()
691 copy = fctx.renamed()
692 if copy and copy[0] != fname:
692 if copy and copy[0] != fname:
693 # Mark the new revision of this file as a copy of another
693 # Mark the new revision of this file as a copy of another
694 # file. This copy data will effectively act as a parent
694 # file. This copy data will effectively act as a parent
695 # of this new revision. If this is a merge, the first
695 # of this new revision. If this is a merge, the first
696 # parent will be the nullid (meaning "look up the copy data")
696 # parent will be the nullid (meaning "look up the copy data")
697 # and the second one will be the other parent. For example:
697 # and the second one will be the other parent. For example:
698 #
698 #
699 # 0 --- 1 --- 3 rev1 changes file foo
699 # 0 --- 1 --- 3 rev1 changes file foo
700 # \ / rev2 renames foo to bar and changes it
700 # \ / rev2 renames foo to bar and changes it
701 # \- 2 -/ rev3 should have bar with all changes and
701 # \- 2 -/ rev3 should have bar with all changes and
702 # should record that bar descends from
702 # should record that bar descends from
703 # bar in rev2 and foo in rev1
703 # bar in rev2 and foo in rev1
704 #
704 #
705 # this allows this merge to succeed:
705 # this allows this merge to succeed:
706 #
706 #
707 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
707 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
708 # \ / merging rev3 and rev4 should use bar@rev2
708 # \ / merging rev3 and rev4 should use bar@rev2
709 # \- 2 --- 4 as the merge base
709 # \- 2 --- 4 as the merge base
710 #
710 #
711
711
712 cfname = copy[0]
712 cfname = copy[0]
713 crev = manifest1.get(cfname)
713 crev = manifest1.get(cfname)
714 newfparent = fparent2
714 newfparent = fparent2
715
715
716 if manifest2: # branch merge
716 if manifest2: # branch merge
717 if fparent2 == nullid or crev is None: # copied on remote side
717 if fparent2 == nullid or crev is None: # copied on remote side
718 if cfname in manifest2:
718 if cfname in manifest2:
719 crev = manifest2[cfname]
719 crev = manifest2[cfname]
720 newfparent = fparent1
720 newfparent = fparent1
721
721
722 # find source in nearest ancestor if we've lost track
722 # find source in nearest ancestor if we've lost track
723 if not crev:
723 if not crev:
724 self.ui.debug(" %s: searching for copy revision for %s\n" %
724 self.ui.debug(" %s: searching for copy revision for %s\n" %
725 (fname, cfname))
725 (fname, cfname))
726 for ancestor in self['.'].ancestors():
726 for ancestor in self['.'].ancestors():
727 if cfname in ancestor:
727 if cfname in ancestor:
728 crev = ancestor[cfname].filenode()
728 crev = ancestor[cfname].filenode()
729 break
729 break
730
730
731 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
731 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
732 meta["copy"] = cfname
732 meta["copy"] = cfname
733 meta["copyrev"] = hex(crev)
733 meta["copyrev"] = hex(crev)
734 fparent1, fparent2 = nullid, newfparent
734 fparent1, fparent2 = nullid, newfparent
735 elif fparent2 != nullid:
735 elif fparent2 != nullid:
736 # is one parent an ancestor of the other?
736 # is one parent an ancestor of the other?
737 fparentancestor = flog.ancestor(fparent1, fparent2)
737 fparentancestor = flog.ancestor(fparent1, fparent2)
738 if fparentancestor == fparent1:
738 if fparentancestor == fparent1:
739 fparent1, fparent2 = fparent2, nullid
739 fparent1, fparent2 = fparent2, nullid
740 elif fparentancestor == fparent2:
740 elif fparentancestor == fparent2:
741 fparent2 = nullid
741 fparent2 = nullid
742
742
743 # is the file changed?
743 # is the file changed?
744 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
744 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
745 changelist.append(fname)
745 changelist.append(fname)
746 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
746 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
747
747
748 # are just the flags changed during merge?
748 # are just the flags changed during merge?
749 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
749 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
750 changelist.append(fname)
750 changelist.append(fname)
751
751
752 return fparent1
752 return fparent1
753
753
754 def commit(self, text="", user=None, date=None, match=None, force=False,
754 def commit(self, text="", user=None, date=None, match=None, force=False,
755 editor=False, extra={}):
755 editor=False, extra={}):
756 """Add a new revision to current repository.
756 """Add a new revision to current repository.
757
757
758 Revision information is gathered from the working directory,
758 Revision information is gathered from the working directory,
759 match can be used to filter the committed files. If editor is
759 match can be used to filter the committed files. If editor is
760 supplied, it is called to get a commit message.
760 supplied, it is called to get a commit message.
761 """
761 """
762
762
763 def fail(f, msg):
763 def fail(f, msg):
764 raise util.Abort('%s: %s' % (f, msg))
764 raise util.Abort('%s: %s' % (f, msg))
765
765
766 if not match:
766 if not match:
767 match = match_.always(self.root, '')
767 match = match_.always(self.root, '')
768
768
769 if not force:
769 if not force:
770 vdirs = []
770 vdirs = []
771 match.dir = vdirs.append
771 match.dir = vdirs.append
772 match.bad = fail
772 match.bad = fail
773
773
774 wlock = self.wlock()
774 wlock = self.wlock()
775 try:
775 try:
776 p1, p2 = self.dirstate.parents()
776 p1, p2 = self.dirstate.parents()
777 wctx = self[None]
777 wctx = self[None]
778
778
779 if (not force and p2 != nullid and match and
779 if (not force and p2 != nullid and match and
780 (match.files() or match.anypats())):
780 (match.files() or match.anypats())):
781 raise util.Abort(_('cannot partially commit a merge '
781 raise util.Abort(_('cannot partially commit a merge '
782 '(do not specify files or patterns)'))
782 '(do not specify files or patterns)'))
783
783
784 changes = self.status(match=match, clean=force)
784 changes = self.status(match=match, clean=force)
785 if force:
785 if force:
786 changes[0].extend(changes[6]) # mq may commit unchanged files
786 changes[0].extend(changes[6]) # mq may commit unchanged files
787
787
788 # check subrepos
788 # check subrepos
789 subs = []
789 subs = []
790 for s in wctx.substate:
790 for s in wctx.substate:
791 if match(s) and wctx.sub(s).dirty():
791 if match(s) and wctx.sub(s).dirty():
792 subs.append(s)
792 subs.append(s)
793 if subs and '.hgsubstate' not in changes[0]:
793 if subs and '.hgsubstate' not in changes[0]:
794 changes[0].insert(0, '.hgsubstate')
794 changes[0].insert(0, '.hgsubstate')
795
795
796 # make sure all explicit patterns are matched
796 # make sure all explicit patterns are matched
797 if not force and match.files():
797 if not force and match.files():
798 matched = set(changes[0] + changes[1] + changes[2])
798 matched = set(changes[0] + changes[1] + changes[2])
799
799
800 for f in match.files():
800 for f in match.files():
801 if f == '.' or f in matched or f in wctx.substate:
801 if f == '.' or f in matched or f in wctx.substate:
802 continue
802 continue
803 if f in changes[3]: # missing
803 if f in changes[3]: # missing
804 fail(f, _('file not found!'))
804 fail(f, _('file not found!'))
805 if f in vdirs: # visited directory
805 if f in vdirs: # visited directory
806 d = f + '/'
806 d = f + '/'
807 for mf in matched:
807 for mf in matched:
808 if mf.startswith(d):
808 if mf.startswith(d):
809 break
809 break
810 else:
810 else:
811 fail(f, _("no match under directory!"))
811 fail(f, _("no match under directory!"))
812 elif f not in self.dirstate:
812 elif f not in self.dirstate:
813 fail(f, _("file not tracked!"))
813 fail(f, _("file not tracked!"))
814
814
815 if (not force and not extra.get("close") and p2 == nullid
815 if (not force and not extra.get("close") and p2 == nullid
816 and not (changes[0] or changes[1] or changes[2])
816 and not (changes[0] or changes[1] or changes[2])
817 and self[None].branch() == self['.'].branch()):
817 and self[None].branch() == self['.'].branch()):
818 return None
818 return None
819
819
820 ms = merge_.mergestate(self)
820 ms = merge_.mergestate(self)
821 for f in changes[0]:
821 for f in changes[0]:
822 if f in ms and ms[f] == 'u':
822 if f in ms and ms[f] == 'u':
823 raise util.Abort(_("unresolved merge conflicts "
823 raise util.Abort(_("unresolved merge conflicts "
824 "(see hg resolve)"))
824 "(see hg resolve)"))
825
825
826 cctx = context.workingctx(self, (p1, p2), text, user, date,
826 cctx = context.workingctx(self, (p1, p2), text, user, date,
827 extra, changes)
827 extra, changes)
828 if editor:
828 if editor:
829 cctx._text = editor(self, cctx, subs)
829 cctx._text = editor(self, cctx, subs)
830 edited = (text != cctx._text)
830 edited = (text != cctx._text)
831
831
832 # commit subs
832 # commit subs
833 if subs:
833 if subs:
834 state = wctx.substate.copy()
834 state = wctx.substate.copy()
835 for s in subs:
835 for s in subs:
836 self.ui.status(_('committing subrepository %s\n') % s)
836 self.ui.status(_('committing subrepository %s\n') % s)
837 sr = wctx.sub(s).commit(cctx._text, user, date)
837 sr = wctx.sub(s).commit(cctx._text, user, date)
838 state[s] = (state[s][0], sr)
838 state[s] = (state[s][0], sr)
839 subrepo.writestate(self, state)
839 subrepo.writestate(self, state)
840
840
841 # Save commit message in case this transaction gets rolled back
841 # Save commit message in case this transaction gets rolled back
842 # (e.g. by a pretxncommit hook). Leave the content alone on
842 # (e.g. by a pretxncommit hook). Leave the content alone on
843 # the assumption that the user will use the same editor again.
843 # the assumption that the user will use the same editor again.
844 msgfile = self.opener('last-message.txt', 'wb')
844 msgfile = self.opener('last-message.txt', 'wb')
845 msgfile.write(cctx._text)
845 msgfile.write(cctx._text)
846 msgfile.close()
846 msgfile.close()
847
847
848 try:
848 try:
849 ret = self.commitctx(cctx, True)
849 ret = self.commitctx(cctx, True)
850 except:
850 except:
851 if edited:
851 if edited:
852 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
852 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
853 self.ui.write(
853 self.ui.write(
854 _('note: commit message saved in %s\n') % msgfn)
854 _('note: commit message saved in %s\n') % msgfn)
855 raise
855 raise
856
856
857 # update dirstate and mergestate
857 # update dirstate and mergestate
858 for f in changes[0] + changes[1]:
858 for f in changes[0] + changes[1]:
859 self.dirstate.normal(f)
859 self.dirstate.normal(f)
860 for f in changes[2]:
860 for f in changes[2]:
861 self.dirstate.forget(f)
861 self.dirstate.forget(f)
862 self.dirstate.setparents(ret)
862 self.dirstate.setparents(ret)
863 ms.reset()
863 ms.reset()
864
864
865 return ret
865 return ret
866
866
867 finally:
867 finally:
868 wlock.release()
868 wlock.release()
869
869
870 def commitctx(self, ctx, error=False):
870 def commitctx(self, ctx, error=False):
871 """Add a new revision to current repository.
871 """Add a new revision to current repository.
872
872
873 Revision information is passed via the context argument.
873 Revision information is passed via the context argument.
874 """
874 """
875
875
876 tr = lock = None
876 tr = lock = None
877 removed = ctx.removed()
877 removed = ctx.removed()
878 p1, p2 = ctx.p1(), ctx.p2()
878 p1, p2 = ctx.p1(), ctx.p2()
879 m1 = p1.manifest().copy()
879 m1 = p1.manifest().copy()
880 m2 = p2.manifest()
880 m2 = p2.manifest()
881 user = ctx.user()
881 user = ctx.user()
882
882
883 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
883 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
884 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
884 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
885
885
886 lock = self.lock()
886 lock = self.lock()
887 try:
887 try:
888 tr = self.transaction()
888 tr = self.transaction()
889 trp = weakref.proxy(tr)
889 trp = weakref.proxy(tr)
890
890
891 # check in files
891 # check in files
892 new = {}
892 new = {}
893 changed = []
893 changed = []
894 linkrev = len(self)
894 linkrev = len(self)
895 for f in sorted(ctx.modified() + ctx.added()):
895 for f in sorted(ctx.modified() + ctx.added()):
896 self.ui.note(f + "\n")
896 self.ui.note(f + "\n")
897 try:
897 try:
898 fctx = ctx[f]
898 fctx = ctx[f]
899 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
899 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
900 changed)
900 changed)
901 m1.set(f, fctx.flags())
901 m1.set(f, fctx.flags())
902 except (OSError, IOError):
902 except (OSError, IOError):
903 if error:
903 if error:
904 self.ui.warn(_("trouble committing %s!\n") % f)
904 self.ui.warn(_("trouble committing %s!\n") % f)
905 raise
905 raise
906 else:
906 else:
907 removed.append(f)
907 removed.append(f)
908
908
909 # update manifest
909 # update manifest
910 m1.update(new)
910 m1.update(new)
911 removed = [f for f in sorted(removed) if f in m1 or f in m2]
911 removed = [f for f in sorted(removed) if f in m1 or f in m2]
912 drop = [f for f in removed if f in m1]
912 drop = [f for f in removed if f in m1]
913 for f in drop:
913 for f in drop:
914 del m1[f]
914 del m1[f]
915 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
915 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
916 p2.manifestnode(), (new, drop))
916 p2.manifestnode(), (new, drop))
917
917
918 # update changelog
918 # update changelog
919 self.changelog.delayupdate()
919 self.changelog.delayupdate()
920 n = self.changelog.add(mn, changed + removed, ctx.description(),
920 n = self.changelog.add(mn, changed + removed, ctx.description(),
921 trp, p1.node(), p2.node(),
921 trp, p1.node(), p2.node(),
922 user, ctx.date(), ctx.extra().copy())
922 user, ctx.date(), ctx.extra().copy())
923 p = lambda: self.changelog.writepending() and self.root or ""
923 p = lambda: self.changelog.writepending() and self.root or ""
924 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
924 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
925 parent2=xp2, pending=p)
925 parent2=xp2, pending=p)
926 self.changelog.finalize(trp)
926 self.changelog.finalize(trp)
927 tr.close()
927 tr.close()
928
928
929 if self._branchcache:
929 if self._branchcache:
930 self.branchtags()
930 self.branchtags()
931
931
932 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
932 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
933 return n
933 return n
934 finally:
934 finally:
935 del tr
935 del tr
936 lock.release()
936 lock.release()
937
937
938 def destroyed(self):
938 def destroyed(self):
939 '''Inform the repository that nodes have been destroyed.
939 '''Inform the repository that nodes have been destroyed.
940 Intended for use by strip and rollback, so there's a common
940 Intended for use by strip and rollback, so there's a common
941 place for anything that has to be done after destroying history.'''
941 place for anything that has to be done after destroying history.'''
942 # XXX it might be nice if we could take the list of destroyed
942 # XXX it might be nice if we could take the list of destroyed
943 # nodes, but I don't see an easy way for rollback() to do that
943 # nodes, but I don't see an easy way for rollback() to do that
944
944
945 # Ensure the persistent tag cache is updated. Doing it now
945 # Ensure the persistent tag cache is updated. Doing it now
946 # means that the tag cache only has to worry about destroyed
946 # means that the tag cache only has to worry about destroyed
947 # heads immediately after a strip/rollback. That in turn
947 # heads immediately after a strip/rollback. That in turn
948 # guarantees that "cachetip == currenttip" (comparing both rev
948 # guarantees that "cachetip == currenttip" (comparing both rev
949 # and node) always means no nodes have been added or destroyed.
949 # and node) always means no nodes have been added or destroyed.
950
950
951 # XXX this is suboptimal when qrefresh'ing: we strip the current
951 # XXX this is suboptimal when qrefresh'ing: we strip the current
952 # head, refresh the tag cache, then immediately add a new head.
952 # head, refresh the tag cache, then immediately add a new head.
953 # But I think doing it this way is necessary for the "instant
953 # But I think doing it this way is necessary for the "instant
954 # tag cache retrieval" case to work.
954 # tag cache retrieval" case to work.
955 tags_.findglobaltags(self.ui, self, {}, {})
955 tags_.findglobaltags(self.ui, self, {}, {})
956
956
957 def walk(self, match, node=None):
957 def walk(self, match, node=None):
958 '''
958 '''
959 walk recursively through the directory tree or a given
959 walk recursively through the directory tree or a given
960 changeset, finding all files matched by the match
960 changeset, finding all files matched by the match
961 function
961 function
962 '''
962 '''
963 return self[node].walk(match)
963 return self[node].walk(match)
964
964
965 def status(self, node1='.', node2=None, match=None,
965 def status(self, node1='.', node2=None, match=None,
966 ignored=False, clean=False, unknown=False):
966 ignored=False, clean=False, unknown=False):
967 """return status of files between two nodes or node and working directory
967 """return status of files between two nodes or node and working directory
968
968
969 If node1 is None, use the first dirstate parent instead.
969 If node1 is None, use the first dirstate parent instead.
970 If node2 is None, compare node1 with working directory.
970 If node2 is None, compare node1 with working directory.
971 """
971 """
972
972
973 def mfmatches(ctx):
973 def mfmatches(ctx):
974 mf = ctx.manifest().copy()
974 mf = ctx.manifest().copy()
975 for fn in mf.keys():
975 for fn in mf.keys():
976 if not match(fn):
976 if not match(fn):
977 del mf[fn]
977 del mf[fn]
978 return mf
978 return mf
979
979
980 if isinstance(node1, context.changectx):
980 if isinstance(node1, context.changectx):
981 ctx1 = node1
981 ctx1 = node1
982 else:
982 else:
983 ctx1 = self[node1]
983 ctx1 = self[node1]
984 if isinstance(node2, context.changectx):
984 if isinstance(node2, context.changectx):
985 ctx2 = node2
985 ctx2 = node2
986 else:
986 else:
987 ctx2 = self[node2]
987 ctx2 = self[node2]
988
988
989 working = ctx2.rev() is None
989 working = ctx2.rev() is None
990 parentworking = working and ctx1 == self['.']
990 parentworking = working and ctx1 == self['.']
991 match = match or match_.always(self.root, self.getcwd())
991 match = match or match_.always(self.root, self.getcwd())
992 listignored, listclean, listunknown = ignored, clean, unknown
992 listignored, listclean, listunknown = ignored, clean, unknown
993
993
994 # load earliest manifest first for caching reasons
994 # load earliest manifest first for caching reasons
995 if not working and ctx2.rev() < ctx1.rev():
995 if not working and ctx2.rev() < ctx1.rev():
996 ctx2.manifest()
996 ctx2.manifest()
997
997
998 if not parentworking:
998 if not parentworking:
999 def bad(f, msg):
999 def bad(f, msg):
1000 if f not in ctx1:
1000 if f not in ctx1:
1001 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1001 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1002 match.bad = bad
1002 match.bad = bad
1003
1003
1004 if working: # we need to scan the working dir
1004 if working: # we need to scan the working dir
1005 subrepos = ctx1.substate.keys()
1005 subrepos = ctx1.substate.keys()
1006 s = self.dirstate.status(match, subrepos, listignored,
1006 s = self.dirstate.status(match, subrepos, listignored,
1007 listclean, listunknown)
1007 listclean, listunknown)
1008 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1008 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1009
1009
1010 # check for any possibly clean files
1010 # check for any possibly clean files
1011 if parentworking and cmp:
1011 if parentworking and cmp:
1012 fixup = []
1012 fixup = []
1013 # do a full compare of any files that might have changed
1013 # do a full compare of any files that might have changed
1014 for f in sorted(cmp):
1014 for f in sorted(cmp):
1015 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1015 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1016 or ctx1[f].cmp(ctx2[f].data())):
1016 or ctx1[f].cmp(ctx2[f].data())):
1017 modified.append(f)
1017 modified.append(f)
1018 else:
1018 else:
1019 fixup.append(f)
1019 fixup.append(f)
1020
1020
1021 if listclean:
1021 if listclean:
1022 clean += fixup
1022 clean += fixup
1023
1023
1024 # update dirstate for files that are actually clean
1024 # update dirstate for files that are actually clean
1025 if fixup:
1025 if fixup:
1026 try:
1026 try:
1027 # updating the dirstate is optional
1027 # updating the dirstate is optional
1028 # so we don't wait on the lock
1028 # so we don't wait on the lock
1029 wlock = self.wlock(False)
1029 wlock = self.wlock(False)
1030 try:
1030 try:
1031 for f in fixup:
1031 for f in fixup:
1032 self.dirstate.normal(f)
1032 self.dirstate.normal(f)
1033 finally:
1033 finally:
1034 wlock.release()
1034 wlock.release()
1035 except error.LockError:
1035 except error.LockError:
1036 pass
1036 pass
1037
1037
1038 if not parentworking:
1038 if not parentworking:
1039 mf1 = mfmatches(ctx1)
1039 mf1 = mfmatches(ctx1)
1040 if working:
1040 if working:
1041 # we are comparing working dir against non-parent
1041 # we are comparing working dir against non-parent
1042 # generate a pseudo-manifest for the working dir
1042 # generate a pseudo-manifest for the working dir
1043 mf2 = mfmatches(self['.'])
1043 mf2 = mfmatches(self['.'])
1044 for f in cmp + modified + added:
1044 for f in cmp + modified + added:
1045 mf2[f] = None
1045 mf2[f] = None
1046 mf2.set(f, ctx2.flags(f))
1046 mf2.set(f, ctx2.flags(f))
1047 for f in removed:
1047 for f in removed:
1048 if f in mf2:
1048 if f in mf2:
1049 del mf2[f]
1049 del mf2[f]
1050 else:
1050 else:
1051 # we are comparing two revisions
1051 # we are comparing two revisions
1052 deleted, unknown, ignored = [], [], []
1052 deleted, unknown, ignored = [], [], []
1053 mf2 = mfmatches(ctx2)
1053 mf2 = mfmatches(ctx2)
1054
1054
1055 modified, added, clean = [], [], []
1055 modified, added, clean = [], [], []
1056 for fn in mf2:
1056 for fn in mf2:
1057 if fn in mf1:
1057 if fn in mf1:
1058 if (mf1.flags(fn) != mf2.flags(fn) or
1058 if (mf1.flags(fn) != mf2.flags(fn) or
1059 (mf1[fn] != mf2[fn] and
1059 (mf1[fn] != mf2[fn] and
1060 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1060 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1061 modified.append(fn)
1061 modified.append(fn)
1062 elif listclean:
1062 elif listclean:
1063 clean.append(fn)
1063 clean.append(fn)
1064 del mf1[fn]
1064 del mf1[fn]
1065 else:
1065 else:
1066 added.append(fn)
1066 added.append(fn)
1067 removed = mf1.keys()
1067 removed = mf1.keys()
1068
1068
1069 r = modified, added, removed, deleted, unknown, ignored, clean
1069 r = modified, added, removed, deleted, unknown, ignored, clean
1070 [l.sort() for l in r]
1070 [l.sort() for l in r]
1071 return r
1071 return r
1072
1072
1073 def add(self, list):
1073 def add(self, list):
1074 wlock = self.wlock()
1074 wlock = self.wlock()
1075 try:
1075 try:
1076 rejected = []
1076 rejected = []
1077 for f in list:
1077 for f in list:
1078 p = self.wjoin(f)
1078 p = self.wjoin(f)
1079 try:
1079 try:
1080 st = os.lstat(p)
1080 st = os.lstat(p)
1081 except:
1081 except:
1082 self.ui.warn(_("%s does not exist!\n") % f)
1082 self.ui.warn(_("%s does not exist!\n") % f)
1083 rejected.append(f)
1083 rejected.append(f)
1084 continue
1084 continue
1085 if st.st_size > 10000000:
1085 if st.st_size > 10000000:
1086 self.ui.warn(_("%s: files over 10MB may cause memory and"
1086 self.ui.warn(_("%s: files over 10MB may cause memory and"
1087 " performance problems\n"
1087 " performance problems\n"
1088 "(use 'hg revert %s' to unadd the file)\n")
1088 "(use 'hg revert %s' to unadd the file)\n")
1089 % (f, f))
1089 % (f, f))
1090 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1090 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1091 self.ui.warn(_("%s not added: only files and symlinks "
1091 self.ui.warn(_("%s not added: only files and symlinks "
1092 "supported currently\n") % f)
1092 "supported currently\n") % f)
1093 rejected.append(p)
1093 rejected.append(p)
1094 elif self.dirstate[f] in 'amn':
1094 elif self.dirstate[f] in 'amn':
1095 self.ui.warn(_("%s already tracked!\n") % f)
1095 self.ui.warn(_("%s already tracked!\n") % f)
1096 elif self.dirstate[f] == 'r':
1096 elif self.dirstate[f] == 'r':
1097 self.dirstate.normallookup(f)
1097 self.dirstate.normallookup(f)
1098 else:
1098 else:
1099 self.dirstate.add(f)
1099 self.dirstate.add(f)
1100 return rejected
1100 return rejected
1101 finally:
1101 finally:
1102 wlock.release()
1102 wlock.release()
1103
1103
1104 def forget(self, list):
1104 def forget(self, list):
1105 wlock = self.wlock()
1105 wlock = self.wlock()
1106 try:
1106 try:
1107 for f in list:
1107 for f in list:
1108 if self.dirstate[f] != 'a':
1108 if self.dirstate[f] != 'a':
1109 self.ui.warn(_("%s not added!\n") % f)
1109 self.ui.warn(_("%s not added!\n") % f)
1110 else:
1110 else:
1111 self.dirstate.forget(f)
1111 self.dirstate.forget(f)
1112 finally:
1112 finally:
1113 wlock.release()
1113 wlock.release()
1114
1114
1115 def remove(self, list, unlink=False):
1115 def remove(self, list, unlink=False):
1116 if unlink:
1116 if unlink:
1117 for f in list:
1117 for f in list:
1118 try:
1118 try:
1119 util.unlink(self.wjoin(f))
1119 util.unlink(self.wjoin(f))
1120 except OSError, inst:
1120 except OSError, inst:
1121 if inst.errno != errno.ENOENT:
1121 if inst.errno != errno.ENOENT:
1122 raise
1122 raise
1123 wlock = self.wlock()
1123 wlock = self.wlock()
1124 try:
1124 try:
1125 for f in list:
1125 for f in list:
1126 if unlink and os.path.exists(self.wjoin(f)):
1126 if unlink and os.path.exists(self.wjoin(f)):
1127 self.ui.warn(_("%s still exists!\n") % f)
1127 self.ui.warn(_("%s still exists!\n") % f)
1128 elif self.dirstate[f] == 'a':
1128 elif self.dirstate[f] == 'a':
1129 self.dirstate.forget(f)
1129 self.dirstate.forget(f)
1130 elif f not in self.dirstate:
1130 elif f not in self.dirstate:
1131 self.ui.warn(_("%s not tracked!\n") % f)
1131 self.ui.warn(_("%s not tracked!\n") % f)
1132 else:
1132 else:
1133 self.dirstate.remove(f)
1133 self.dirstate.remove(f)
1134 finally:
1134 finally:
1135 wlock.release()
1135 wlock.release()
1136
1136
1137 def undelete(self, list):
1137 def undelete(self, list):
1138 manifests = [self.manifest.read(self.changelog.read(p)[0])
1138 manifests = [self.manifest.read(self.changelog.read(p)[0])
1139 for p in self.dirstate.parents() if p != nullid]
1139 for p in self.dirstate.parents() if p != nullid]
1140 wlock = self.wlock()
1140 wlock = self.wlock()
1141 try:
1141 try:
1142 for f in list:
1142 for f in list:
1143 if self.dirstate[f] != 'r':
1143 if self.dirstate[f] != 'r':
1144 self.ui.warn(_("%s not removed!\n") % f)
1144 self.ui.warn(_("%s not removed!\n") % f)
1145 else:
1145 else:
1146 m = f in manifests[0] and manifests[0] or manifests[1]
1146 m = f in manifests[0] and manifests[0] or manifests[1]
1147 t = self.file(f).read(m[f])
1147 t = self.file(f).read(m[f])
1148 self.wwrite(f, t, m.flags(f))
1148 self.wwrite(f, t, m.flags(f))
1149 self.dirstate.normal(f)
1149 self.dirstate.normal(f)
1150 finally:
1150 finally:
1151 wlock.release()
1151 wlock.release()
1152
1152
1153 def copy(self, source, dest):
1153 def copy(self, source, dest):
1154 p = self.wjoin(dest)
1154 p = self.wjoin(dest)
1155 if not (os.path.exists(p) or os.path.islink(p)):
1155 if not (os.path.exists(p) or os.path.islink(p)):
1156 self.ui.warn(_("%s does not exist!\n") % dest)
1156 self.ui.warn(_("%s does not exist!\n") % dest)
1157 elif not (os.path.isfile(p) or os.path.islink(p)):
1157 elif not (os.path.isfile(p) or os.path.islink(p)):
1158 self.ui.warn(_("copy failed: %s is not a file or a "
1158 self.ui.warn(_("copy failed: %s is not a file or a "
1159 "symbolic link\n") % dest)
1159 "symbolic link\n") % dest)
1160 else:
1160 else:
1161 wlock = self.wlock()
1161 wlock = self.wlock()
1162 try:
1162 try:
1163 if self.dirstate[dest] in '?r':
1163 if self.dirstate[dest] in '?r':
1164 self.dirstate.add(dest)
1164 self.dirstate.add(dest)
1165 self.dirstate.copy(source, dest)
1165 self.dirstate.copy(source, dest)
1166 finally:
1166 finally:
1167 wlock.release()
1167 wlock.release()
1168
1168
1169 def heads(self, start=None):
1169 def heads(self, start=None):
1170 heads = self.changelog.heads(start)
1170 heads = self.changelog.heads(start)
1171 # sort the output in rev descending order
1171 # sort the output in rev descending order
1172 heads = [(-self.changelog.rev(h), h) for h in heads]
1172 heads = [(-self.changelog.rev(h), h) for h in heads]
1173 return [n for (r, n) in sorted(heads)]
1173 return [n for (r, n) in sorted(heads)]
1174
1174
1175 def branchheads(self, branch=None, start=None, closed=False):
1175 def branchheads(self, branch=None, start=None, closed=False):
1176 '''return a (possibly filtered) list of heads for the given branch
1176 '''return a (possibly filtered) list of heads for the given branch
1177
1177
1178 Heads are returned in topological order, from newest to oldest.
1178 Heads are returned in topological order, from newest to oldest.
1179 If branch is None, use the dirstate branch.
1179 If branch is None, use the dirstate branch.
1180 If start is not None, return only heads reachable from start.
1180 If start is not None, return only heads reachable from start.
1181 If closed is True, return heads that are marked as closed as well.
1181 If closed is True, return heads that are marked as closed as well.
1182 '''
1182 '''
1183 if branch is None:
1183 if branch is None:
1184 branch = self[None].branch()
1184 branch = self[None].branch()
1185 branches = self.branchmap()
1185 branches = self.branchmap()
1186 if branch not in branches:
1186 if branch not in branches:
1187 return []
1187 return []
1188 # the cache returns heads ordered lowest to highest
1188 # the cache returns heads ordered lowest to highest
1189 bheads = list(reversed(branches[branch]))
1189 bheads = list(reversed(branches[branch]))
1190 if start is not None:
1190 if start is not None:
1191 # filter out the heads that cannot be reached from startrev
1191 # filter out the heads that cannot be reached from startrev
1192 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1192 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1193 bheads = [h for h in bheads if h in fbheads]
1193 bheads = [h for h in bheads if h in fbheads]
1194 if not closed:
1194 if not closed:
1195 bheads = [h for h in bheads if
1195 bheads = [h for h in bheads if
1196 ('close' not in self.changelog.read(h)[5])]
1196 ('close' not in self.changelog.read(h)[5])]
1197 return bheads
1197 return bheads
1198
1198
1199 def branches(self, nodes):
1199 def branches(self, nodes):
1200 if not nodes:
1200 if not nodes:
1201 nodes = [self.changelog.tip()]
1201 nodes = [self.changelog.tip()]
1202 b = []
1202 b = []
1203 for n in nodes:
1203 for n in nodes:
1204 t = n
1204 t = n
1205 while 1:
1205 while 1:
1206 p = self.changelog.parents(n)
1206 p = self.changelog.parents(n)
1207 if p[1] != nullid or p[0] == nullid:
1207 if p[1] != nullid or p[0] == nullid:
1208 b.append((t, n, p[0], p[1]))
1208 b.append((t, n, p[0], p[1]))
1209 break
1209 break
1210 n = p[0]
1210 n = p[0]
1211 return b
1211 return b
1212
1212
1213 def between(self, pairs):
1213 def between(self, pairs):
1214 r = []
1214 r = []
1215
1215
1216 for top, bottom in pairs:
1216 for top, bottom in pairs:
1217 n, l, i = top, [], 0
1217 n, l, i = top, [], 0
1218 f = 1
1218 f = 1
1219
1219
1220 while n != bottom and n != nullid:
1220 while n != bottom and n != nullid:
1221 p = self.changelog.parents(n)[0]
1221 p = self.changelog.parents(n)[0]
1222 if i == f:
1222 if i == f:
1223 l.append(n)
1223 l.append(n)
1224 f = f * 2
1224 f = f * 2
1225 n = p
1225 n = p
1226 i += 1
1226 i += 1
1227
1227
1228 r.append(l)
1228 r.append(l)
1229
1229
1230 return r
1230 return r
1231
1231
1232 def findincoming(self, remote, base=None, heads=None, force=False):
1232 def findincoming(self, remote, base=None, heads=None, force=False):
1233 """Return list of roots of the subsets of missing nodes from remote
1233 """Return list of roots of the subsets of missing nodes from remote
1234
1234
1235 If base dict is specified, assume that these nodes and their parents
1235 If base dict is specified, assume that these nodes and their parents
1236 exist on the remote side and that no child of a node of base exists
1236 exist on the remote side and that no child of a node of base exists
1237 in both remote and self.
1237 in both remote and self.
1238 Furthermore base will be updated to include the nodes that exists
1238 Furthermore base will be updated to include the nodes that exists
1239 in self and remote but no children exists in self and remote.
1239 in self and remote but no children exists in self and remote.
1240 If a list of heads is specified, return only nodes which are heads
1240 If a list of heads is specified, return only nodes which are heads
1241 or ancestors of these heads.
1241 or ancestors of these heads.
1242
1242
1243 All the ancestors of base are in self and in remote.
1243 All the ancestors of base are in self and in remote.
1244 All the descendants of the list returned are missing in self.
1244 All the descendants of the list returned are missing in self.
1245 (and so we know that the rest of the nodes are missing in remote, see
1245 (and so we know that the rest of the nodes are missing in remote, see
1246 outgoing)
1246 outgoing)
1247 """
1247 """
1248 return self.findcommonincoming(remote, base, heads, force)[1]
1248 return self.findcommonincoming(remote, base, heads, force)[1]
1249
1249
1250 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1250 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1251 """Return a tuple (common, missing roots, heads) used to identify
1251 """Return a tuple (common, missing roots, heads) used to identify
1252 missing nodes from remote.
1252 missing nodes from remote.
1253
1253
1254 If base dict is specified, assume that these nodes and their parents
1254 If base dict is specified, assume that these nodes and their parents
1255 exist on the remote side and that no child of a node of base exists
1255 exist on the remote side and that no child of a node of base exists
1256 in both remote and self.
1256 in both remote and self.
1257 Furthermore base will be updated to include the nodes that exists
1257 Furthermore base will be updated to include the nodes that exists
1258 in self and remote but no children exists in self and remote.
1258 in self and remote but no children exists in self and remote.
1259 If a list of heads is specified, return only nodes which are heads
1259 If a list of heads is specified, return only nodes which are heads
1260 or ancestors of these heads.
1260 or ancestors of these heads.
1261
1261
1262 All the ancestors of base are in self and in remote.
1262 All the ancestors of base are in self and in remote.
1263 """
1263 """
1264 m = self.changelog.nodemap
1264 m = self.changelog.nodemap
1265 search = []
1265 search = []
1266 fetch = set()
1266 fetch = set()
1267 seen = set()
1267 seen = set()
1268 seenbranch = set()
1268 seenbranch = set()
1269 if base is None:
1269 if base is None:
1270 base = {}
1270 base = {}
1271
1271
1272 if not heads:
1272 if not heads:
1273 heads = remote.heads()
1273 heads = remote.heads()
1274
1274
1275 if self.changelog.tip() == nullid:
1275 if self.changelog.tip() == nullid:
1276 base[nullid] = 1
1276 base[nullid] = 1
1277 if heads != [nullid]:
1277 if heads != [nullid]:
1278 return [nullid], [nullid], list(heads)
1278 return [nullid], [nullid], list(heads)
1279 return [nullid], [], []
1279 return [nullid], [], []
1280
1280
1281 # assume we're closer to the tip than the root
1281 # assume we're closer to the tip than the root
1282 # and start by examining the heads
1282 # and start by examining the heads
1283 self.ui.status(_("searching for changes\n"))
1283 self.ui.status(_("searching for changes\n"))
1284
1284
1285 unknown = []
1285 unknown = []
1286 for h in heads:
1286 for h in heads:
1287 if h not in m:
1287 if h not in m:
1288 unknown.append(h)
1288 unknown.append(h)
1289 else:
1289 else:
1290 base[h] = 1
1290 base[h] = 1
1291
1291
1292 heads = unknown
1292 heads = unknown
1293 if not unknown:
1293 if not unknown:
1294 return base.keys(), [], []
1294 return base.keys(), [], []
1295
1295
1296 req = set(unknown)
1296 req = set(unknown)
1297 reqcnt = 0
1297 reqcnt = 0
1298
1298
1299 # search through remote branches
1299 # search through remote branches
1300 # a 'branch' here is a linear segment of history, with four parts:
1300 # a 'branch' here is a linear segment of history, with four parts:
1301 # head, root, first parent, second parent
1301 # head, root, first parent, second parent
1302 # (a branch always has two parents (or none) by definition)
1302 # (a branch always has two parents (or none) by definition)
1303 unknown = remote.branches(unknown)
1303 unknown = remote.branches(unknown)
1304 while unknown:
1304 while unknown:
1305 r = []
1305 r = []
1306 while unknown:
1306 while unknown:
1307 n = unknown.pop(0)
1307 n = unknown.pop(0)
1308 if n[0] in seen:
1308 if n[0] in seen:
1309 continue
1309 continue
1310
1310
1311 self.ui.debug("examining %s:%s\n"
1311 self.ui.debug("examining %s:%s\n"
1312 % (short(n[0]), short(n[1])))
1312 % (short(n[0]), short(n[1])))
1313 if n[0] == nullid: # found the end of the branch
1313 if n[0] == nullid: # found the end of the branch
1314 pass
1314 pass
1315 elif n in seenbranch:
1315 elif n in seenbranch:
1316 self.ui.debug("branch already found\n")
1316 self.ui.debug("branch already found\n")
1317 continue
1317 continue
1318 elif n[1] and n[1] in m: # do we know the base?
1318 elif n[1] and n[1] in m: # do we know the base?
1319 self.ui.debug("found incomplete branch %s:%s\n"
1319 self.ui.debug("found incomplete branch %s:%s\n"
1320 % (short(n[0]), short(n[1])))
1320 % (short(n[0]), short(n[1])))
1321 search.append(n[0:2]) # schedule branch range for scanning
1321 search.append(n[0:2]) # schedule branch range for scanning
1322 seenbranch.add(n)
1322 seenbranch.add(n)
1323 else:
1323 else:
1324 if n[1] not in seen and n[1] not in fetch:
1324 if n[1] not in seen and n[1] not in fetch:
1325 if n[2] in m and n[3] in m:
1325 if n[2] in m and n[3] in m:
1326 self.ui.debug("found new changeset %s\n" %
1326 self.ui.debug("found new changeset %s\n" %
1327 short(n[1]))
1327 short(n[1]))
1328 fetch.add(n[1]) # earliest unknown
1328 fetch.add(n[1]) # earliest unknown
1329 for p in n[2:4]:
1329 for p in n[2:4]:
1330 if p in m:
1330 if p in m:
1331 base[p] = 1 # latest known
1331 base[p] = 1 # latest known
1332
1332
1333 for p in n[2:4]:
1333 for p in n[2:4]:
1334 if p not in req and p not in m:
1334 if p not in req and p not in m:
1335 r.append(p)
1335 r.append(p)
1336 req.add(p)
1336 req.add(p)
1337 seen.add(n[0])
1337 seen.add(n[0])
1338
1338
1339 if r:
1339 if r:
1340 reqcnt += 1
1340 reqcnt += 1
1341 self.ui.debug("request %d: %s\n" %
1341 self.ui.debug("request %d: %s\n" %
1342 (reqcnt, " ".join(map(short, r))))
1342 (reqcnt, " ".join(map(short, r))))
1343 for p in xrange(0, len(r), 10):
1343 for p in xrange(0, len(r), 10):
1344 for b in remote.branches(r[p:p + 10]):
1344 for b in remote.branches(r[p:p + 10]):
1345 self.ui.debug("received %s:%s\n" %
1345 self.ui.debug("received %s:%s\n" %
1346 (short(b[0]), short(b[1])))
1346 (short(b[0]), short(b[1])))
1347 unknown.append(b)
1347 unknown.append(b)
1348
1348
1349 # do binary search on the branches we found
1349 # do binary search on the branches we found
1350 while search:
1350 while search:
1351 newsearch = []
1351 newsearch = []
1352 reqcnt += 1
1352 reqcnt += 1
1353 for n, l in zip(search, remote.between(search)):
1353 for n, l in zip(search, remote.between(search)):
1354 l.append(n[1])
1354 l.append(n[1])
1355 p = n[0]
1355 p = n[0]
1356 f = 1
1356 f = 1
1357 for i in l:
1357 for i in l:
1358 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1358 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1359 if i in m:
1359 if i in m:
1360 if f <= 2:
1360 if f <= 2:
1361 self.ui.debug("found new branch changeset %s\n" %
1361 self.ui.debug("found new branch changeset %s\n" %
1362 short(p))
1362 short(p))
1363 fetch.add(p)
1363 fetch.add(p)
1364 base[i] = 1
1364 base[i] = 1
1365 else:
1365 else:
1366 self.ui.debug("narrowed branch search to %s:%s\n"
1366 self.ui.debug("narrowed branch search to %s:%s\n"
1367 % (short(p), short(i)))
1367 % (short(p), short(i)))
1368 newsearch.append((p, i))
1368 newsearch.append((p, i))
1369 break
1369 break
1370 p, f = i, f * 2
1370 p, f = i, f * 2
1371 search = newsearch
1371 search = newsearch
1372
1372
1373 # sanity check our fetch list
1373 # sanity check our fetch list
1374 for f in fetch:
1374 for f in fetch:
1375 if f in m:
1375 if f in m:
1376 raise error.RepoError(_("already have changeset ")
1376 raise error.RepoError(_("already have changeset ")
1377 + short(f[:4]))
1377 + short(f[:4]))
1378
1378
1379 if base.keys() == [nullid]:
1379 if base.keys() == [nullid]:
1380 if force:
1380 if force:
1381 self.ui.warn(_("warning: repository is unrelated\n"))
1381 self.ui.warn(_("warning: repository is unrelated\n"))
1382 else:
1382 else:
1383 raise util.Abort(_("repository is unrelated"))
1383 raise util.Abort(_("repository is unrelated"))
1384
1384
1385 self.ui.debug("found new changesets starting at " +
1385 self.ui.debug("found new changesets starting at " +
1386 " ".join([short(f) for f in fetch]) + "\n")
1386 " ".join([short(f) for f in fetch]) + "\n")
1387
1387
1388 self.ui.debug("%d total queries\n" % reqcnt)
1388 self.ui.debug("%d total queries\n" % reqcnt)
1389
1389
1390 return base.keys(), list(fetch), heads
1390 return base.keys(), list(fetch), heads
1391
1391
1392 def findoutgoing(self, remote, base=None, heads=None, force=False):
1392 def findoutgoing(self, remote, base=None, heads=None, force=False):
1393 """Return list of nodes that are roots of subsets not in remote
1393 """Return list of nodes that are roots of subsets not in remote
1394
1394
1395 If base dict is specified, assume that these nodes and their parents
1395 If base dict is specified, assume that these nodes and their parents
1396 exist on the remote side.
1396 exist on the remote side.
1397 If a list of heads is specified, return only nodes which are heads
1397 If a list of heads is specified, return only nodes which are heads
1398 or ancestors of these heads, and return a second element which
1398 or ancestors of these heads, and return a second element which
1399 contains all remote heads which get new children.
1399 contains all remote heads which get new children.
1400 """
1400 """
1401 if base is None:
1401 if base is None:
1402 base = {}
1402 base = {}
1403 self.findincoming(remote, base, heads, force=force)
1403 self.findincoming(remote, base, heads, force=force)
1404
1404
1405 self.ui.debug("common changesets up to "
1405 self.ui.debug("common changesets up to "
1406 + " ".join(map(short, base.keys())) + "\n")
1406 + " ".join(map(short, base.keys())) + "\n")
1407
1407
1408 remain = set(self.changelog.nodemap)
1408 remain = set(self.changelog.nodemap)
1409
1409
1410 # prune everything remote has from the tree
1410 # prune everything remote has from the tree
1411 remain.remove(nullid)
1411 remain.remove(nullid)
1412 remove = base.keys()
1412 remove = base.keys()
1413 while remove:
1413 while remove:
1414 n = remove.pop(0)
1414 n = remove.pop(0)
1415 if n in remain:
1415 if n in remain:
1416 remain.remove(n)
1416 remain.remove(n)
1417 for p in self.changelog.parents(n):
1417 for p in self.changelog.parents(n):
1418 remove.append(p)
1418 remove.append(p)
1419
1419
1420 # find every node whose parents have been pruned
1420 # find every node whose parents have been pruned
1421 subset = []
1421 subset = []
1422 # find every remote head that will get new children
1422 # find every remote head that will get new children
1423 updated_heads = set()
1423 updated_heads = set()
1424 for n in remain:
1424 for n in remain:
1425 p1, p2 = self.changelog.parents(n)
1425 p1, p2 = self.changelog.parents(n)
1426 if p1 not in remain and p2 not in remain:
1426 if p1 not in remain and p2 not in remain:
1427 subset.append(n)
1427 subset.append(n)
1428 if heads:
1428 if heads:
1429 if p1 in heads:
1429 if p1 in heads:
1430 updated_heads.add(p1)
1430 updated_heads.add(p1)
1431 if p2 in heads:
1431 if p2 in heads:
1432 updated_heads.add(p2)
1432 updated_heads.add(p2)
1433
1433
1434 # this is the set of all roots we have to push
1434 # this is the set of all roots we have to push
1435 if heads:
1435 if heads:
1436 return subset, list(updated_heads)
1436 return subset, list(updated_heads)
1437 else:
1437 else:
1438 return subset
1438 return subset
1439
1439
1440 def pull(self, remote, heads=None, force=False):
1440 def pull(self, remote, heads=None, force=False):
1441 lock = self.lock()
1441 lock = self.lock()
1442 try:
1442 try:
1443 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1443 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1444 force=force)
1444 force=force)
1445 if fetch == [nullid]:
1445 if fetch == [nullid]:
1446 self.ui.status(_("requesting all changes\n"))
1446 self.ui.status(_("requesting all changes\n"))
1447
1447
1448 if not fetch:
1448 if not fetch:
1449 self.ui.status(_("no changes found\n"))
1449 self.ui.status(_("no changes found\n"))
1450 return 0
1450 return 0
1451
1451
1452 if heads is None and remote.capable('changegroupsubset'):
1452 if heads is None and remote.capable('changegroupsubset'):
1453 heads = rheads
1453 heads = rheads
1454
1454
1455 if heads is None:
1455 if heads is None:
1456 cg = remote.changegroup(fetch, 'pull')
1456 cg = remote.changegroup(fetch, 'pull')
1457 else:
1457 else:
1458 if not remote.capable('changegroupsubset'):
1458 if not remote.capable('changegroupsubset'):
1459 raise util.Abort(_("Partial pull cannot be done because "
1459 raise util.Abort(_("Partial pull cannot be done because "
1460 "other repository doesn't support "
1460 "other repository doesn't support "
1461 "changegroupsubset."))
1461 "changegroupsubset."))
1462 cg = remote.changegroupsubset(fetch, heads, 'pull')
1462 cg = remote.changegroupsubset(fetch, heads, 'pull')
1463 return self.addchangegroup(cg, 'pull', remote.url())
1463 return self.addchangegroup(cg, 'pull', remote.url())
1464 finally:
1464 finally:
1465 lock.release()
1465 lock.release()
1466
1466
1467 def push(self, remote, force=False, revs=None):
1467 def push(self, remote, force=False, revs=None):
1468 # there are two ways to push to remote repo:
1468 # there are two ways to push to remote repo:
1469 #
1469 #
1470 # addchangegroup assumes local user can lock remote
1470 # addchangegroup assumes local user can lock remote
1471 # repo (local filesystem, old ssh servers).
1471 # repo (local filesystem, old ssh servers).
1472 #
1472 #
1473 # unbundle assumes local user cannot lock remote repo (new ssh
1473 # unbundle assumes local user cannot lock remote repo (new ssh
1474 # servers, http servers).
1474 # servers, http servers).
1475
1475
1476 if remote.capable('unbundle'):
1476 if remote.capable('unbundle'):
1477 return self.push_unbundle(remote, force, revs)
1477 return self.push_unbundle(remote, force, revs)
1478 return self.push_addchangegroup(remote, force, revs)
1478 return self.push_addchangegroup(remote, force, revs)
1479
1479
1480 def prepush(self, remote, force, revs):
1480 def prepush(self, remote, force, revs):
1481 '''Analyze the local and remote repositories and determine which
1481 '''Analyze the local and remote repositories and determine which
1482 changesets need to be pushed to the remote. Return a tuple
1482 changesets need to be pushed to the remote. Return a tuple
1483 (changegroup, remoteheads). changegroup is a readable file-like
1483 (changegroup, remoteheads). changegroup is a readable file-like
1484 object whose read() returns successive changegroup chunks ready to
1484 object whose read() returns successive changegroup chunks ready to
1485 be sent over the wire. remoteheads is the list of remote heads.
1485 be sent over the wire. remoteheads is the list of remote heads.
1486 '''
1486 '''
1487 common = {}
1487 common = {}
1488 remote_heads = remote.heads()
1488 remote_heads = remote.heads()
1489 inc = self.findincoming(remote, common, remote_heads, force=force)
1489 inc = self.findincoming(remote, common, remote_heads, force=force)
1490
1490
1491 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1491 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1492 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1492 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1493
1493
1494 def checkbranch(lheads, rheads, updatelb):
1494 def checkbranch(lheads, rheads, updatelb, branchname=None):
1495 '''
1495 '''
1496 check whether there are more local heads than remote heads on
1496 check whether there are more local heads than remote heads on
1497 a specific branch.
1497 a specific branch.
1498
1498
1499 lheads: local branch heads
1499 lheads: local branch heads
1500 rheads: remote branch heads
1500 rheads: remote branch heads
1501 updatelb: outgoing local branch bases
1501 updatelb: outgoing local branch bases
1502 '''
1502 '''
1503
1503
1504 warn = 0
1504 warn = 0
1505
1505
1506 if not revs and len(lheads) > len(rheads):
1506 if not revs and len(lheads) > len(rheads):
1507 warn = 1
1507 warn = 1
1508 else:
1508 else:
1509 # add local heads involved in the push
1509 # add local heads involved in the push
1510 updatelheads = [self.changelog.heads(x, lheads)
1510 updatelheads = [self.changelog.heads(x, lheads)
1511 for x in updatelb]
1511 for x in updatelb]
1512 newheads = set(sum(updatelheads, [])) & set(lheads)
1512 newheads = set(sum(updatelheads, [])) & set(lheads)
1513
1513
1514 if not newheads:
1514 if not newheads:
1515 return True
1515 return True
1516
1516
1517 # add heads we don't have or that are not involved in the push
1517 # add heads we don't have or that are not involved in the push
1518 for r in rheads:
1518 for r in rheads:
1519 if r in self.changelog.nodemap:
1519 if r in self.changelog.nodemap:
1520 desc = self.changelog.heads(r, heads)
1520 desc = self.changelog.heads(r, heads)
1521 l = [h for h in heads if h in desc]
1521 l = [h for h in heads if h in desc]
1522 if not l:
1522 if not l:
1523 newheads.add(r)
1523 newheads.add(r)
1524 else:
1524 else:
1525 newheads.add(r)
1525 newheads.add(r)
1526 if len(newheads) > len(rheads):
1526 if len(newheads) > len(rheads):
1527 warn = 1
1527 warn = 1
1528
1528
1529 if warn:
1529 if warn:
1530 self.ui.warn(_("abort: push creates new remote heads!\n"))
1530 if branchname is not None:
1531 self.ui.status(_("(did you forget to merge?"
1531 msg = _("abort: push creates new remote heads"
1532 " use push -f to force)\n"))
1532 " on branch '%s'!\n") % branchname
1533 else:
1534 msg = _("abort: push creates new remote heads!\n")
1535 self.ui.warn(msg)
1536 if len(lheads) > len(rheads):
1537 self.ui.status(_("(did you forget to merge?"
1538 " use push -f to force)\n"))
1539 else:
1540 self.ui.status(_("(you should pull and merge or"
1541 " use push -f to force)\n"))
1533 return False
1542 return False
1534 return True
1543 return True
1535
1544
1536 if not bases:
1545 if not bases:
1537 self.ui.status(_("no changes found\n"))
1546 self.ui.status(_("no changes found\n"))
1538 return None, 1
1547 return None, 1
1539 elif not force:
1548 elif not force:
1540 # Check for each named branch if we're creating new remote heads.
1549 # Check for each named branch if we're creating new remote heads.
1541 # To be a remote head after push, node must be either:
1550 # To be a remote head after push, node must be either:
1542 # - unknown locally
1551 # - unknown locally
1543 # - a local outgoing head descended from update
1552 # - a local outgoing head descended from update
1544 # - a remote head that's known locally and not
1553 # - a remote head that's known locally and not
1545 # ancestral to an outgoing head
1554 # ancestral to an outgoing head
1546 #
1555 #
1547 # New named branches cannot be created without --force.
1556 # New named branches cannot be created without --force.
1548
1557
1549 if remote_heads != [nullid]:
1558 if remote_heads != [nullid]:
1550 if remote.capable('branchmap'):
1559 if remote.capable('branchmap'):
1551 remotebrheads = remote.branchmap()
1560 remotebrheads = remote.branchmap()
1552
1561
1553 if not revs:
1562 if not revs:
1554 localbrheads = self.branchmap()
1563 localbrheads = self.branchmap()
1555 else:
1564 else:
1556 localbrheads = {}
1565 localbrheads = {}
1557 for n in heads:
1566 for n in heads:
1558 branch = self[n].branch()
1567 branch = self[n].branch()
1559 localbrheads.setdefault(branch, []).append(n)
1568 localbrheads.setdefault(branch, []).append(n)
1560
1569
1561 newbranches = list(set(localbrheads) - set(remotebrheads))
1570 newbranches = list(set(localbrheads) - set(remotebrheads))
1562 if newbranches: # new branch requires --force
1571 if newbranches: # new branch requires --force
1563 branchnames = ', '.join("%s" % b for b in newbranches)
1572 branchnames = ', '.join("%s" % b for b in newbranches)
1564 self.ui.warn(_("abort: push creates "
1573 self.ui.warn(_("abort: push creates "
1565 "new remote branches: %s!\n")
1574 "new remote branches: %s!\n")
1566 % branchnames)
1575 % branchnames)
1567 # propose 'push -b .' in the msg too?
1576 # propose 'push -b .' in the msg too?
1568 self.ui.status(_("(use 'hg push -f' to force)\n"))
1577 self.ui.status(_("(use 'hg push -f' to force)\n"))
1569 return None, 0
1578 return None, 0
1570 for branch, lheads in localbrheads.iteritems():
1579 for branch, lheads in localbrheads.iteritems():
1571 if branch in remotebrheads:
1580 if branch in remotebrheads:
1572 rheads = remotebrheads[branch]
1581 rheads = remotebrheads[branch]
1573 if not checkbranch(lheads, rheads, update):
1582 if not checkbranch(lheads, rheads, update, branch):
1574 return None, 0
1583 return None, 0
1575 else:
1584 else:
1576 if not checkbranch(heads, remote_heads, update):
1585 if not checkbranch(heads, remote_heads, update):
1577 return None, 0
1586 return None, 0
1578
1587
1579 if inc:
1588 if inc:
1580 self.ui.warn(_("note: unsynced remote changes!\n"))
1589 self.ui.warn(_("note: unsynced remote changes!\n"))
1581
1590
1582
1591
1583 if revs is None:
1592 if revs is None:
1584 # use the fast path, no race possible on push
1593 # use the fast path, no race possible on push
1585 nodes = self.changelog.findmissing(common.keys())
1594 nodes = self.changelog.findmissing(common.keys())
1586 cg = self._changegroup(nodes, 'push')
1595 cg = self._changegroup(nodes, 'push')
1587 else:
1596 else:
1588 cg = self.changegroupsubset(update, revs, 'push')
1597 cg = self.changegroupsubset(update, revs, 'push')
1589 return cg, remote_heads
1598 return cg, remote_heads
1590
1599
1591 def push_addchangegroup(self, remote, force, revs):
1600 def push_addchangegroup(self, remote, force, revs):
1592 lock = remote.lock()
1601 lock = remote.lock()
1593 try:
1602 try:
1594 ret = self.prepush(remote, force, revs)
1603 ret = self.prepush(remote, force, revs)
1595 if ret[0] is not None:
1604 if ret[0] is not None:
1596 cg, remote_heads = ret
1605 cg, remote_heads = ret
1597 return remote.addchangegroup(cg, 'push', self.url())
1606 return remote.addchangegroup(cg, 'push', self.url())
1598 return ret[1]
1607 return ret[1]
1599 finally:
1608 finally:
1600 lock.release()
1609 lock.release()
1601
1610
1602 def push_unbundle(self, remote, force, revs):
1611 def push_unbundle(self, remote, force, revs):
1603 # local repo finds heads on server, finds out what revs it
1612 # local repo finds heads on server, finds out what revs it
1604 # must push. once revs transferred, if server finds it has
1613 # must push. once revs transferred, if server finds it has
1605 # different heads (someone else won commit/push race), server
1614 # different heads (someone else won commit/push race), server
1606 # aborts.
1615 # aborts.
1607
1616
1608 ret = self.prepush(remote, force, revs)
1617 ret = self.prepush(remote, force, revs)
1609 if ret[0] is not None:
1618 if ret[0] is not None:
1610 cg, remote_heads = ret
1619 cg, remote_heads = ret
1611 if force:
1620 if force:
1612 remote_heads = ['force']
1621 remote_heads = ['force']
1613 return remote.unbundle(cg, remote_heads, 'push')
1622 return remote.unbundle(cg, remote_heads, 'push')
1614 return ret[1]
1623 return ret[1]
1615
1624
1616 def changegroupinfo(self, nodes, source):
1625 def changegroupinfo(self, nodes, source):
1617 if self.ui.verbose or source == 'bundle':
1626 if self.ui.verbose or source == 'bundle':
1618 self.ui.status(_("%d changesets found\n") % len(nodes))
1627 self.ui.status(_("%d changesets found\n") % len(nodes))
1619 if self.ui.debugflag:
1628 if self.ui.debugflag:
1620 self.ui.debug("list of changesets:\n")
1629 self.ui.debug("list of changesets:\n")
1621 for node in nodes:
1630 for node in nodes:
1622 self.ui.debug("%s\n" % hex(node))
1631 self.ui.debug("%s\n" % hex(node))
1623
1632
1624 def changegroupsubset(self, bases, heads, source, extranodes=None):
1633 def changegroupsubset(self, bases, heads, source, extranodes=None):
1625 """Compute a changegroup consisting of all the nodes that are
1634 """Compute a changegroup consisting of all the nodes that are
1626 descendents of any of the bases and ancestors of any of the heads.
1635 descendents of any of the bases and ancestors of any of the heads.
1627 Return a chunkbuffer object whose read() method will return
1636 Return a chunkbuffer object whose read() method will return
1628 successive changegroup chunks.
1637 successive changegroup chunks.
1629
1638
1630 It is fairly complex as determining which filenodes and which
1639 It is fairly complex as determining which filenodes and which
1631 manifest nodes need to be included for the changeset to be complete
1640 manifest nodes need to be included for the changeset to be complete
1632 is non-trivial.
1641 is non-trivial.
1633
1642
1634 Another wrinkle is doing the reverse, figuring out which changeset in
1643 Another wrinkle is doing the reverse, figuring out which changeset in
1635 the changegroup a particular filenode or manifestnode belongs to.
1644 the changegroup a particular filenode or manifestnode belongs to.
1636
1645
1637 The caller can specify some nodes that must be included in the
1646 The caller can specify some nodes that must be included in the
1638 changegroup using the extranodes argument. It should be a dict
1647 changegroup using the extranodes argument. It should be a dict
1639 where the keys are the filenames (or 1 for the manifest), and the
1648 where the keys are the filenames (or 1 for the manifest), and the
1640 values are lists of (node, linknode) tuples, where node is a wanted
1649 values are lists of (node, linknode) tuples, where node is a wanted
1641 node and linknode is the changelog node that should be transmitted as
1650 node and linknode is the changelog node that should be transmitted as
1642 the linkrev.
1651 the linkrev.
1643 """
1652 """
1644
1653
1645 # Set up some initial variables
1654 # Set up some initial variables
1646 # Make it easy to refer to self.changelog
1655 # Make it easy to refer to self.changelog
1647 cl = self.changelog
1656 cl = self.changelog
1648 # msng is short for missing - compute the list of changesets in this
1657 # msng is short for missing - compute the list of changesets in this
1649 # changegroup.
1658 # changegroup.
1650 if not bases:
1659 if not bases:
1651 bases = [nullid]
1660 bases = [nullid]
1652 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1661 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1653
1662
1654 if extranodes is None:
1663 if extranodes is None:
1655 # can we go through the fast path ?
1664 # can we go through the fast path ?
1656 heads.sort()
1665 heads.sort()
1657 allheads = self.heads()
1666 allheads = self.heads()
1658 allheads.sort()
1667 allheads.sort()
1659 if heads == allheads:
1668 if heads == allheads:
1660 return self._changegroup(msng_cl_lst, source)
1669 return self._changegroup(msng_cl_lst, source)
1661
1670
1662 # slow path
1671 # slow path
1663 self.hook('preoutgoing', throw=True, source=source)
1672 self.hook('preoutgoing', throw=True, source=source)
1664
1673
1665 self.changegroupinfo(msng_cl_lst, source)
1674 self.changegroupinfo(msng_cl_lst, source)
1666 # Some bases may turn out to be superfluous, and some heads may be
1675 # Some bases may turn out to be superfluous, and some heads may be
1667 # too. nodesbetween will return the minimal set of bases and heads
1676 # too. nodesbetween will return the minimal set of bases and heads
1668 # necessary to re-create the changegroup.
1677 # necessary to re-create the changegroup.
1669
1678
1670 # Known heads are the list of heads that it is assumed the recipient
1679 # Known heads are the list of heads that it is assumed the recipient
1671 # of this changegroup will know about.
1680 # of this changegroup will know about.
1672 knownheads = set()
1681 knownheads = set()
1673 # We assume that all parents of bases are known heads.
1682 # We assume that all parents of bases are known heads.
1674 for n in bases:
1683 for n in bases:
1675 knownheads.update(cl.parents(n))
1684 knownheads.update(cl.parents(n))
1676 knownheads.discard(nullid)
1685 knownheads.discard(nullid)
1677 knownheads = list(knownheads)
1686 knownheads = list(knownheads)
1678 if knownheads:
1687 if knownheads:
1679 # Now that we know what heads are known, we can compute which
1688 # Now that we know what heads are known, we can compute which
1680 # changesets are known. The recipient must know about all
1689 # changesets are known. The recipient must know about all
1681 # changesets required to reach the known heads from the null
1690 # changesets required to reach the known heads from the null
1682 # changeset.
1691 # changeset.
1683 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1692 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1684 junk = None
1693 junk = None
1685 # Transform the list into a set.
1694 # Transform the list into a set.
1686 has_cl_set = set(has_cl_set)
1695 has_cl_set = set(has_cl_set)
1687 else:
1696 else:
1688 # If there were no known heads, the recipient cannot be assumed to
1697 # If there were no known heads, the recipient cannot be assumed to
1689 # know about any changesets.
1698 # know about any changesets.
1690 has_cl_set = set()
1699 has_cl_set = set()
1691
1700
1692 # Make it easy to refer to self.manifest
1701 # Make it easy to refer to self.manifest
1693 mnfst = self.manifest
1702 mnfst = self.manifest
1694 # We don't know which manifests are missing yet
1703 # We don't know which manifests are missing yet
1695 msng_mnfst_set = {}
1704 msng_mnfst_set = {}
1696 # Nor do we know which filenodes are missing.
1705 # Nor do we know which filenodes are missing.
1697 msng_filenode_set = {}
1706 msng_filenode_set = {}
1698
1707
1699 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1708 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1700 junk = None
1709 junk = None
1701
1710
1702 # A changeset always belongs to itself, so the changenode lookup
1711 # A changeset always belongs to itself, so the changenode lookup
1703 # function for a changenode is identity.
1712 # function for a changenode is identity.
1704 def identity(x):
1713 def identity(x):
1705 return x
1714 return x
1706
1715
1707 # If we determine that a particular file or manifest node must be a
1716 # If we determine that a particular file or manifest node must be a
1708 # node that the recipient of the changegroup will already have, we can
1717 # node that the recipient of the changegroup will already have, we can
1709 # also assume the recipient will have all the parents. This function
1718 # also assume the recipient will have all the parents. This function
1710 # prunes them from the set of missing nodes.
1719 # prunes them from the set of missing nodes.
1711 def prune_parents(revlog, hasset, msngset):
1720 def prune_parents(revlog, hasset, msngset):
1712 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1721 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1713 msngset.pop(revlog.node(r), None)
1722 msngset.pop(revlog.node(r), None)
1714
1723
1715 # Use the information collected in collect_manifests_and_files to say
1724 # Use the information collected in collect_manifests_and_files to say
1716 # which changenode any manifestnode belongs to.
1725 # which changenode any manifestnode belongs to.
1717 def lookup_manifest_link(mnfstnode):
1726 def lookup_manifest_link(mnfstnode):
1718 return msng_mnfst_set[mnfstnode]
1727 return msng_mnfst_set[mnfstnode]
1719
1728
1720 # A function generating function that sets up the initial environment
1729 # A function generating function that sets up the initial environment
1721 # the inner function.
1730 # the inner function.
1722 def filenode_collector(changedfiles):
1731 def filenode_collector(changedfiles):
1723 # This gathers information from each manifestnode included in the
1732 # This gathers information from each manifestnode included in the
1724 # changegroup about which filenodes the manifest node references
1733 # changegroup about which filenodes the manifest node references
1725 # so we can include those in the changegroup too.
1734 # so we can include those in the changegroup too.
1726 #
1735 #
1727 # It also remembers which changenode each filenode belongs to. It
1736 # It also remembers which changenode each filenode belongs to. It
1728 # does this by assuming the a filenode belongs to the changenode
1737 # does this by assuming the a filenode belongs to the changenode
1729 # the first manifest that references it belongs to.
1738 # the first manifest that references it belongs to.
1730 def collect_msng_filenodes(mnfstnode):
1739 def collect_msng_filenodes(mnfstnode):
1731 r = mnfst.rev(mnfstnode)
1740 r = mnfst.rev(mnfstnode)
1732 if r - 1 in mnfst.parentrevs(r):
1741 if r - 1 in mnfst.parentrevs(r):
1733 # If the previous rev is one of the parents,
1742 # If the previous rev is one of the parents,
1734 # we only need to see a diff.
1743 # we only need to see a diff.
1735 deltamf = mnfst.readdelta(mnfstnode)
1744 deltamf = mnfst.readdelta(mnfstnode)
1736 # For each line in the delta
1745 # For each line in the delta
1737 for f, fnode in deltamf.iteritems():
1746 for f, fnode in deltamf.iteritems():
1738 f = changedfiles.get(f, None)
1747 f = changedfiles.get(f, None)
1739 # And if the file is in the list of files we care
1748 # And if the file is in the list of files we care
1740 # about.
1749 # about.
1741 if f is not None:
1750 if f is not None:
1742 # Get the changenode this manifest belongs to
1751 # Get the changenode this manifest belongs to
1743 clnode = msng_mnfst_set[mnfstnode]
1752 clnode = msng_mnfst_set[mnfstnode]
1744 # Create the set of filenodes for the file if
1753 # Create the set of filenodes for the file if
1745 # there isn't one already.
1754 # there isn't one already.
1746 ndset = msng_filenode_set.setdefault(f, {})
1755 ndset = msng_filenode_set.setdefault(f, {})
1747 # And set the filenode's changelog node to the
1756 # And set the filenode's changelog node to the
1748 # manifest's if it hasn't been set already.
1757 # manifest's if it hasn't been set already.
1749 ndset.setdefault(fnode, clnode)
1758 ndset.setdefault(fnode, clnode)
1750 else:
1759 else:
1751 # Otherwise we need a full manifest.
1760 # Otherwise we need a full manifest.
1752 m = mnfst.read(mnfstnode)
1761 m = mnfst.read(mnfstnode)
1753 # For every file in we care about.
1762 # For every file in we care about.
1754 for f in changedfiles:
1763 for f in changedfiles:
1755 fnode = m.get(f, None)
1764 fnode = m.get(f, None)
1756 # If it's in the manifest
1765 # If it's in the manifest
1757 if fnode is not None:
1766 if fnode is not None:
1758 # See comments above.
1767 # See comments above.
1759 clnode = msng_mnfst_set[mnfstnode]
1768 clnode = msng_mnfst_set[mnfstnode]
1760 ndset = msng_filenode_set.setdefault(f, {})
1769 ndset = msng_filenode_set.setdefault(f, {})
1761 ndset.setdefault(fnode, clnode)
1770 ndset.setdefault(fnode, clnode)
1762 return collect_msng_filenodes
1771 return collect_msng_filenodes
1763
1772
1764 # We have a list of filenodes we think we need for a file, lets remove
1773 # We have a list of filenodes we think we need for a file, lets remove
1765 # all those we know the recipient must have.
1774 # all those we know the recipient must have.
1766 def prune_filenodes(f, filerevlog):
1775 def prune_filenodes(f, filerevlog):
1767 msngset = msng_filenode_set[f]
1776 msngset = msng_filenode_set[f]
1768 hasset = set()
1777 hasset = set()
1769 # If a 'missing' filenode thinks it belongs to a changenode we
1778 # If a 'missing' filenode thinks it belongs to a changenode we
1770 # assume the recipient must have, then the recipient must have
1779 # assume the recipient must have, then the recipient must have
1771 # that filenode.
1780 # that filenode.
1772 for n in msngset:
1781 for n in msngset:
1773 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1782 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1774 if clnode in has_cl_set:
1783 if clnode in has_cl_set:
1775 hasset.add(n)
1784 hasset.add(n)
1776 prune_parents(filerevlog, hasset, msngset)
1785 prune_parents(filerevlog, hasset, msngset)
1777
1786
1778 # A function generator function that sets up the a context for the
1787 # A function generator function that sets up the a context for the
1779 # inner function.
1788 # inner function.
1780 def lookup_filenode_link_func(fname):
1789 def lookup_filenode_link_func(fname):
1781 msngset = msng_filenode_set[fname]
1790 msngset = msng_filenode_set[fname]
1782 # Lookup the changenode the filenode belongs to.
1791 # Lookup the changenode the filenode belongs to.
1783 def lookup_filenode_link(fnode):
1792 def lookup_filenode_link(fnode):
1784 return msngset[fnode]
1793 return msngset[fnode]
1785 return lookup_filenode_link
1794 return lookup_filenode_link
1786
1795
1787 # Add the nodes that were explicitly requested.
1796 # Add the nodes that were explicitly requested.
1788 def add_extra_nodes(name, nodes):
1797 def add_extra_nodes(name, nodes):
1789 if not extranodes or name not in extranodes:
1798 if not extranodes or name not in extranodes:
1790 return
1799 return
1791
1800
1792 for node, linknode in extranodes[name]:
1801 for node, linknode in extranodes[name]:
1793 if node not in nodes:
1802 if node not in nodes:
1794 nodes[node] = linknode
1803 nodes[node] = linknode
1795
1804
1796 # Now that we have all theses utility functions to help out and
1805 # Now that we have all theses utility functions to help out and
1797 # logically divide up the task, generate the group.
1806 # logically divide up the task, generate the group.
1798 def gengroup():
1807 def gengroup():
1799 # The set of changed files starts empty.
1808 # The set of changed files starts empty.
1800 changedfiles = {}
1809 changedfiles = {}
1801 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1810 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1802
1811
1803 # Create a changenode group generator that will call our functions
1812 # Create a changenode group generator that will call our functions
1804 # back to lookup the owning changenode and collect information.
1813 # back to lookup the owning changenode and collect information.
1805 group = cl.group(msng_cl_lst, identity, collect)
1814 group = cl.group(msng_cl_lst, identity, collect)
1806 for chnk in group:
1815 for chnk in group:
1807 yield chnk
1816 yield chnk
1808
1817
1809 # Figure out which manifest nodes (of the ones we think might be
1818 # Figure out which manifest nodes (of the ones we think might be
1810 # part of the changegroup) the recipient must know about and
1819 # part of the changegroup) the recipient must know about and
1811 # remove them from the changegroup.
1820 # remove them from the changegroup.
1812 has_mnfst_set = set()
1821 has_mnfst_set = set()
1813 for n in msng_mnfst_set:
1822 for n in msng_mnfst_set:
1814 # If a 'missing' manifest thinks it belongs to a changenode
1823 # If a 'missing' manifest thinks it belongs to a changenode
1815 # the recipient is assumed to have, obviously the recipient
1824 # the recipient is assumed to have, obviously the recipient
1816 # must have that manifest.
1825 # must have that manifest.
1817 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1826 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1818 if linknode in has_cl_set:
1827 if linknode in has_cl_set:
1819 has_mnfst_set.add(n)
1828 has_mnfst_set.add(n)
1820 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1829 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1821 add_extra_nodes(1, msng_mnfst_set)
1830 add_extra_nodes(1, msng_mnfst_set)
1822 msng_mnfst_lst = msng_mnfst_set.keys()
1831 msng_mnfst_lst = msng_mnfst_set.keys()
1823 # Sort the manifestnodes by revision number.
1832 # Sort the manifestnodes by revision number.
1824 msng_mnfst_lst.sort(key=mnfst.rev)
1833 msng_mnfst_lst.sort(key=mnfst.rev)
1825 # Create a generator for the manifestnodes that calls our lookup
1834 # Create a generator for the manifestnodes that calls our lookup
1826 # and data collection functions back.
1835 # and data collection functions back.
1827 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1836 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1828 filenode_collector(changedfiles))
1837 filenode_collector(changedfiles))
1829 for chnk in group:
1838 for chnk in group:
1830 yield chnk
1839 yield chnk
1831
1840
1832 # These are no longer needed, dereference and toss the memory for
1841 # These are no longer needed, dereference and toss the memory for
1833 # them.
1842 # them.
1834 msng_mnfst_lst = None
1843 msng_mnfst_lst = None
1835 msng_mnfst_set.clear()
1844 msng_mnfst_set.clear()
1836
1845
1837 if extranodes:
1846 if extranodes:
1838 for fname in extranodes:
1847 for fname in extranodes:
1839 if isinstance(fname, int):
1848 if isinstance(fname, int):
1840 continue
1849 continue
1841 msng_filenode_set.setdefault(fname, {})
1850 msng_filenode_set.setdefault(fname, {})
1842 changedfiles[fname] = 1
1851 changedfiles[fname] = 1
1843 # Go through all our files in order sorted by name.
1852 # Go through all our files in order sorted by name.
1844 for fname in sorted(changedfiles):
1853 for fname in sorted(changedfiles):
1845 filerevlog = self.file(fname)
1854 filerevlog = self.file(fname)
1846 if not len(filerevlog):
1855 if not len(filerevlog):
1847 raise util.Abort(_("empty or missing revlog for %s") % fname)
1856 raise util.Abort(_("empty or missing revlog for %s") % fname)
1848 # Toss out the filenodes that the recipient isn't really
1857 # Toss out the filenodes that the recipient isn't really
1849 # missing.
1858 # missing.
1850 if fname in msng_filenode_set:
1859 if fname in msng_filenode_set:
1851 prune_filenodes(fname, filerevlog)
1860 prune_filenodes(fname, filerevlog)
1852 add_extra_nodes(fname, msng_filenode_set[fname])
1861 add_extra_nodes(fname, msng_filenode_set[fname])
1853 msng_filenode_lst = msng_filenode_set[fname].keys()
1862 msng_filenode_lst = msng_filenode_set[fname].keys()
1854 else:
1863 else:
1855 msng_filenode_lst = []
1864 msng_filenode_lst = []
1856 # If any filenodes are left, generate the group for them,
1865 # If any filenodes are left, generate the group for them,
1857 # otherwise don't bother.
1866 # otherwise don't bother.
1858 if len(msng_filenode_lst) > 0:
1867 if len(msng_filenode_lst) > 0:
1859 yield changegroup.chunkheader(len(fname))
1868 yield changegroup.chunkheader(len(fname))
1860 yield fname
1869 yield fname
1861 # Sort the filenodes by their revision #
1870 # Sort the filenodes by their revision #
1862 msng_filenode_lst.sort(key=filerevlog.rev)
1871 msng_filenode_lst.sort(key=filerevlog.rev)
1863 # Create a group generator and only pass in a changenode
1872 # Create a group generator and only pass in a changenode
1864 # lookup function as we need to collect no information
1873 # lookup function as we need to collect no information
1865 # from filenodes.
1874 # from filenodes.
1866 group = filerevlog.group(msng_filenode_lst,
1875 group = filerevlog.group(msng_filenode_lst,
1867 lookup_filenode_link_func(fname))
1876 lookup_filenode_link_func(fname))
1868 for chnk in group:
1877 for chnk in group:
1869 yield chnk
1878 yield chnk
1870 if fname in msng_filenode_set:
1879 if fname in msng_filenode_set:
1871 # Don't need this anymore, toss it to free memory.
1880 # Don't need this anymore, toss it to free memory.
1872 del msng_filenode_set[fname]
1881 del msng_filenode_set[fname]
1873 # Signal that no more groups are left.
1882 # Signal that no more groups are left.
1874 yield changegroup.closechunk()
1883 yield changegroup.closechunk()
1875
1884
1876 if msng_cl_lst:
1885 if msng_cl_lst:
1877 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1886 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1878
1887
1879 return util.chunkbuffer(gengroup())
1888 return util.chunkbuffer(gengroup())
1880
1889
1881 def changegroup(self, basenodes, source):
1890 def changegroup(self, basenodes, source):
1882 # to avoid a race we use changegroupsubset() (issue1320)
1891 # to avoid a race we use changegroupsubset() (issue1320)
1883 return self.changegroupsubset(basenodes, self.heads(), source)
1892 return self.changegroupsubset(basenodes, self.heads(), source)
1884
1893
1885 def _changegroup(self, nodes, source):
1894 def _changegroup(self, nodes, source):
1886 """Compute the changegroup of all nodes that we have that a recipient
1895 """Compute the changegroup of all nodes that we have that a recipient
1887 doesn't. Return a chunkbuffer object whose read() method will return
1896 doesn't. Return a chunkbuffer object whose read() method will return
1888 successive changegroup chunks.
1897 successive changegroup chunks.
1889
1898
1890 This is much easier than the previous function as we can assume that
1899 This is much easier than the previous function as we can assume that
1891 the recipient has any changenode we aren't sending them.
1900 the recipient has any changenode we aren't sending them.
1892
1901
1893 nodes is the set of nodes to send"""
1902 nodes is the set of nodes to send"""
1894
1903
1895 self.hook('preoutgoing', throw=True, source=source)
1904 self.hook('preoutgoing', throw=True, source=source)
1896
1905
1897 cl = self.changelog
1906 cl = self.changelog
1898 revset = set([cl.rev(n) for n in nodes])
1907 revset = set([cl.rev(n) for n in nodes])
1899 self.changegroupinfo(nodes, source)
1908 self.changegroupinfo(nodes, source)
1900
1909
1901 def identity(x):
1910 def identity(x):
1902 return x
1911 return x
1903
1912
1904 def gennodelst(log):
1913 def gennodelst(log):
1905 for r in log:
1914 for r in log:
1906 if log.linkrev(r) in revset:
1915 if log.linkrev(r) in revset:
1907 yield log.node(r)
1916 yield log.node(r)
1908
1917
1909 def lookuprevlink_func(revlog):
1918 def lookuprevlink_func(revlog):
1910 def lookuprevlink(n):
1919 def lookuprevlink(n):
1911 return cl.node(revlog.linkrev(revlog.rev(n)))
1920 return cl.node(revlog.linkrev(revlog.rev(n)))
1912 return lookuprevlink
1921 return lookuprevlink
1913
1922
1914 def gengroup():
1923 def gengroup():
1915 '''yield a sequence of changegroup chunks (strings)'''
1924 '''yield a sequence of changegroup chunks (strings)'''
1916 # construct a list of all changed files
1925 # construct a list of all changed files
1917 changedfiles = {}
1926 changedfiles = {}
1918 mmfs = {}
1927 mmfs = {}
1919 collect = changegroup.collector(cl, mmfs, changedfiles)
1928 collect = changegroup.collector(cl, mmfs, changedfiles)
1920
1929
1921 for chnk in cl.group(nodes, identity, collect):
1930 for chnk in cl.group(nodes, identity, collect):
1922 yield chnk
1931 yield chnk
1923
1932
1924 mnfst = self.manifest
1933 mnfst = self.manifest
1925 nodeiter = gennodelst(mnfst)
1934 nodeiter = gennodelst(mnfst)
1926 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1935 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1927 yield chnk
1936 yield chnk
1928
1937
1929 for fname in sorted(changedfiles):
1938 for fname in sorted(changedfiles):
1930 filerevlog = self.file(fname)
1939 filerevlog = self.file(fname)
1931 if not len(filerevlog):
1940 if not len(filerevlog):
1932 raise util.Abort(_("empty or missing revlog for %s") % fname)
1941 raise util.Abort(_("empty or missing revlog for %s") % fname)
1933 nodeiter = gennodelst(filerevlog)
1942 nodeiter = gennodelst(filerevlog)
1934 nodeiter = list(nodeiter)
1943 nodeiter = list(nodeiter)
1935 if nodeiter:
1944 if nodeiter:
1936 yield changegroup.chunkheader(len(fname))
1945 yield changegroup.chunkheader(len(fname))
1937 yield fname
1946 yield fname
1938 lookup = lookuprevlink_func(filerevlog)
1947 lookup = lookuprevlink_func(filerevlog)
1939 for chnk in filerevlog.group(nodeiter, lookup):
1948 for chnk in filerevlog.group(nodeiter, lookup):
1940 yield chnk
1949 yield chnk
1941
1950
1942 yield changegroup.closechunk()
1951 yield changegroup.closechunk()
1943
1952
1944 if nodes:
1953 if nodes:
1945 self.hook('outgoing', node=hex(nodes[0]), source=source)
1954 self.hook('outgoing', node=hex(nodes[0]), source=source)
1946
1955
1947 return util.chunkbuffer(gengroup())
1956 return util.chunkbuffer(gengroup())
1948
1957
1949 def addchangegroup(self, source, srctype, url, emptyok=False):
1958 def addchangegroup(self, source, srctype, url, emptyok=False):
1950 """add changegroup to repo.
1959 """add changegroup to repo.
1951
1960
1952 return values:
1961 return values:
1953 - nothing changed or no source: 0
1962 - nothing changed or no source: 0
1954 - more heads than before: 1+added heads (2..n)
1963 - more heads than before: 1+added heads (2..n)
1955 - less heads than before: -1-removed heads (-2..-n)
1964 - less heads than before: -1-removed heads (-2..-n)
1956 - number of heads stays the same: 1
1965 - number of heads stays the same: 1
1957 """
1966 """
1958 def csmap(x):
1967 def csmap(x):
1959 self.ui.debug("add changeset %s\n" % short(x))
1968 self.ui.debug("add changeset %s\n" % short(x))
1960 return len(cl)
1969 return len(cl)
1961
1970
1962 def revmap(x):
1971 def revmap(x):
1963 return cl.rev(x)
1972 return cl.rev(x)
1964
1973
1965 if not source:
1974 if not source:
1966 return 0
1975 return 0
1967
1976
1968 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1977 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1969
1978
1970 changesets = files = revisions = 0
1979 changesets = files = revisions = 0
1971
1980
1972 # write changelog data to temp files so concurrent readers will not see
1981 # write changelog data to temp files so concurrent readers will not see
1973 # inconsistent view
1982 # inconsistent view
1974 cl = self.changelog
1983 cl = self.changelog
1975 cl.delayupdate()
1984 cl.delayupdate()
1976 oldheads = len(cl.heads())
1985 oldheads = len(cl.heads())
1977
1986
1978 tr = self.transaction()
1987 tr = self.transaction()
1979 try:
1988 try:
1980 trp = weakref.proxy(tr)
1989 trp = weakref.proxy(tr)
1981 # pull off the changeset group
1990 # pull off the changeset group
1982 self.ui.status(_("adding changesets\n"))
1991 self.ui.status(_("adding changesets\n"))
1983 clstart = len(cl)
1992 clstart = len(cl)
1984 chunkiter = changegroup.chunkiter(source)
1993 chunkiter = changegroup.chunkiter(source)
1985 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1994 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1986 raise util.Abort(_("received changelog group is empty"))
1995 raise util.Abort(_("received changelog group is empty"))
1987 clend = len(cl)
1996 clend = len(cl)
1988 changesets = clend - clstart
1997 changesets = clend - clstart
1989
1998
1990 # pull off the manifest group
1999 # pull off the manifest group
1991 self.ui.status(_("adding manifests\n"))
2000 self.ui.status(_("adding manifests\n"))
1992 chunkiter = changegroup.chunkiter(source)
2001 chunkiter = changegroup.chunkiter(source)
1993 # no need to check for empty manifest group here:
2002 # no need to check for empty manifest group here:
1994 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2003 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1995 # no new manifest will be created and the manifest group will
2004 # no new manifest will be created and the manifest group will
1996 # be empty during the pull
2005 # be empty during the pull
1997 self.manifest.addgroup(chunkiter, revmap, trp)
2006 self.manifest.addgroup(chunkiter, revmap, trp)
1998
2007
1999 # process the files
2008 # process the files
2000 self.ui.status(_("adding file changes\n"))
2009 self.ui.status(_("adding file changes\n"))
2001 while 1:
2010 while 1:
2002 f = changegroup.getchunk(source)
2011 f = changegroup.getchunk(source)
2003 if not f:
2012 if not f:
2004 break
2013 break
2005 self.ui.debug("adding %s revisions\n" % f)
2014 self.ui.debug("adding %s revisions\n" % f)
2006 fl = self.file(f)
2015 fl = self.file(f)
2007 o = len(fl)
2016 o = len(fl)
2008 chunkiter = changegroup.chunkiter(source)
2017 chunkiter = changegroup.chunkiter(source)
2009 if fl.addgroup(chunkiter, revmap, trp) is None:
2018 if fl.addgroup(chunkiter, revmap, trp) is None:
2010 raise util.Abort(_("received file revlog group is empty"))
2019 raise util.Abort(_("received file revlog group is empty"))
2011 revisions += len(fl) - o
2020 revisions += len(fl) - o
2012 files += 1
2021 files += 1
2013
2022
2014 newheads = len(cl.heads())
2023 newheads = len(cl.heads())
2015 heads = ""
2024 heads = ""
2016 if oldheads and newheads != oldheads:
2025 if oldheads and newheads != oldheads:
2017 heads = _(" (%+d heads)") % (newheads - oldheads)
2026 heads = _(" (%+d heads)") % (newheads - oldheads)
2018
2027
2019 self.ui.status(_("added %d changesets"
2028 self.ui.status(_("added %d changesets"
2020 " with %d changes to %d files%s\n")
2029 " with %d changes to %d files%s\n")
2021 % (changesets, revisions, files, heads))
2030 % (changesets, revisions, files, heads))
2022
2031
2023 if changesets > 0:
2032 if changesets > 0:
2024 p = lambda: cl.writepending() and self.root or ""
2033 p = lambda: cl.writepending() and self.root or ""
2025 self.hook('pretxnchangegroup', throw=True,
2034 self.hook('pretxnchangegroup', throw=True,
2026 node=hex(cl.node(clstart)), source=srctype,
2035 node=hex(cl.node(clstart)), source=srctype,
2027 url=url, pending=p)
2036 url=url, pending=p)
2028
2037
2029 # make changelog see real files again
2038 # make changelog see real files again
2030 cl.finalize(trp)
2039 cl.finalize(trp)
2031
2040
2032 tr.close()
2041 tr.close()
2033 finally:
2042 finally:
2034 del tr
2043 del tr
2035
2044
2036 if changesets > 0:
2045 if changesets > 0:
2037 # forcefully update the on-disk branch cache
2046 # forcefully update the on-disk branch cache
2038 self.ui.debug("updating the branch cache\n")
2047 self.ui.debug("updating the branch cache\n")
2039 self.branchtags()
2048 self.branchtags()
2040 self.hook("changegroup", node=hex(cl.node(clstart)),
2049 self.hook("changegroup", node=hex(cl.node(clstart)),
2041 source=srctype, url=url)
2050 source=srctype, url=url)
2042
2051
2043 for i in xrange(clstart, clend):
2052 for i in xrange(clstart, clend):
2044 self.hook("incoming", node=hex(cl.node(i)),
2053 self.hook("incoming", node=hex(cl.node(i)),
2045 source=srctype, url=url)
2054 source=srctype, url=url)
2046
2055
2047 # never return 0 here:
2056 # never return 0 here:
2048 if newheads < oldheads:
2057 if newheads < oldheads:
2049 return newheads - oldheads - 1
2058 return newheads - oldheads - 1
2050 else:
2059 else:
2051 return newheads - oldheads + 1
2060 return newheads - oldheads + 1
2052
2061
2053
2062
2054 def stream_in(self, remote):
2063 def stream_in(self, remote):
2055 fp = remote.stream_out()
2064 fp = remote.stream_out()
2056 l = fp.readline()
2065 l = fp.readline()
2057 try:
2066 try:
2058 resp = int(l)
2067 resp = int(l)
2059 except ValueError:
2068 except ValueError:
2060 raise error.ResponseError(
2069 raise error.ResponseError(
2061 _('Unexpected response from remote server:'), l)
2070 _('Unexpected response from remote server:'), l)
2062 if resp == 1:
2071 if resp == 1:
2063 raise util.Abort(_('operation forbidden by server'))
2072 raise util.Abort(_('operation forbidden by server'))
2064 elif resp == 2:
2073 elif resp == 2:
2065 raise util.Abort(_('locking the remote repository failed'))
2074 raise util.Abort(_('locking the remote repository failed'))
2066 elif resp != 0:
2075 elif resp != 0:
2067 raise util.Abort(_('the server sent an unknown error code'))
2076 raise util.Abort(_('the server sent an unknown error code'))
2068 self.ui.status(_('streaming all changes\n'))
2077 self.ui.status(_('streaming all changes\n'))
2069 l = fp.readline()
2078 l = fp.readline()
2070 try:
2079 try:
2071 total_files, total_bytes = map(int, l.split(' ', 1))
2080 total_files, total_bytes = map(int, l.split(' ', 1))
2072 except (ValueError, TypeError):
2081 except (ValueError, TypeError):
2073 raise error.ResponseError(
2082 raise error.ResponseError(
2074 _('Unexpected response from remote server:'), l)
2083 _('Unexpected response from remote server:'), l)
2075 self.ui.status(_('%d files to transfer, %s of data\n') %
2084 self.ui.status(_('%d files to transfer, %s of data\n') %
2076 (total_files, util.bytecount(total_bytes)))
2085 (total_files, util.bytecount(total_bytes)))
2077 start = time.time()
2086 start = time.time()
2078 for i in xrange(total_files):
2087 for i in xrange(total_files):
2079 # XXX doesn't support '\n' or '\r' in filenames
2088 # XXX doesn't support '\n' or '\r' in filenames
2080 l = fp.readline()
2089 l = fp.readline()
2081 try:
2090 try:
2082 name, size = l.split('\0', 1)
2091 name, size = l.split('\0', 1)
2083 size = int(size)
2092 size = int(size)
2084 except (ValueError, TypeError):
2093 except (ValueError, TypeError):
2085 raise error.ResponseError(
2094 raise error.ResponseError(
2086 _('Unexpected response from remote server:'), l)
2095 _('Unexpected response from remote server:'), l)
2087 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2096 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2088 # for backwards compat, name was partially encoded
2097 # for backwards compat, name was partially encoded
2089 ofp = self.sopener(store.decodedir(name), 'w')
2098 ofp = self.sopener(store.decodedir(name), 'w')
2090 for chunk in util.filechunkiter(fp, limit=size):
2099 for chunk in util.filechunkiter(fp, limit=size):
2091 ofp.write(chunk)
2100 ofp.write(chunk)
2092 ofp.close()
2101 ofp.close()
2093 elapsed = time.time() - start
2102 elapsed = time.time() - start
2094 if elapsed <= 0:
2103 if elapsed <= 0:
2095 elapsed = 0.001
2104 elapsed = 0.001
2096 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2105 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2097 (util.bytecount(total_bytes), elapsed,
2106 (util.bytecount(total_bytes), elapsed,
2098 util.bytecount(total_bytes / elapsed)))
2107 util.bytecount(total_bytes / elapsed)))
2099 self.invalidate()
2108 self.invalidate()
2100 return len(self.heads()) + 1
2109 return len(self.heads()) + 1
2101
2110
2102 def clone(self, remote, heads=[], stream=False):
2111 def clone(self, remote, heads=[], stream=False):
2103 '''clone remote repository.
2112 '''clone remote repository.
2104
2113
2105 keyword arguments:
2114 keyword arguments:
2106 heads: list of revs to clone (forces use of pull)
2115 heads: list of revs to clone (forces use of pull)
2107 stream: use streaming clone if possible'''
2116 stream: use streaming clone if possible'''
2108
2117
2109 # now, all clients that can request uncompressed clones can
2118 # now, all clients that can request uncompressed clones can
2110 # read repo formats supported by all servers that can serve
2119 # read repo formats supported by all servers that can serve
2111 # them.
2120 # them.
2112
2121
2113 # if revlog format changes, client will have to check version
2122 # if revlog format changes, client will have to check version
2114 # and format flags on "stream" capability, and use
2123 # and format flags on "stream" capability, and use
2115 # uncompressed only if compatible.
2124 # uncompressed only if compatible.
2116
2125
2117 if stream and not heads and remote.capable('stream'):
2126 if stream and not heads and remote.capable('stream'):
2118 return self.stream_in(remote)
2127 return self.stream_in(remote)
2119 return self.pull(remote, heads)
2128 return self.pull(remote, heads)
2120
2129
2121 # used to avoid circular references so destructors work
2130 # used to avoid circular references so destructors work
2122 def aftertrans(files):
2131 def aftertrans(files):
2123 renamefiles = [tuple(t) for t in files]
2132 renamefiles = [tuple(t) for t in files]
2124 def a():
2133 def a():
2125 for src, dest in renamefiles:
2134 for src, dest in renamefiles:
2126 util.rename(src, dest)
2135 util.rename(src, dest)
2127 return a
2136 return a
2128
2137
2129 def instance(ui, path, create):
2138 def instance(ui, path, create):
2130 return localrepository(ui, util.drop_scheme('file', path), create)
2139 return localrepository(ui, util.drop_scheme('file', path), create)
2131
2140
2132 def islocal(path):
2141 def islocal(path):
2133 return True
2142 return True
@@ -1,172 +1,172 b''
1 updating to branch default
1 updating to branch default
2 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
2 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
3 pushing to ../a
3 pushing to ../a
4 searching for changes
4 searching for changes
5 abort: push creates new remote heads!
5 abort: push creates new remote heads on branch 'default'!
6 (did you forget to merge? use push -f to force)
6 (you should pull and merge or use push -f to force)
7 pulling from ../a
7 pulling from ../a
8 searching for changes
8 searching for changes
9 adding changesets
9 adding changesets
10 adding manifests
10 adding manifests
11 adding file changes
11 adding file changes
12 added 1 changesets with 1 changes to 1 files (+1 heads)
12 added 1 changesets with 1 changes to 1 files (+1 heads)
13 (run 'hg heads' to see heads, 'hg merge' to merge)
13 (run 'hg heads' to see heads, 'hg merge' to merge)
14 pushing to ../a
14 pushing to ../a
15 searching for changes
15 searching for changes
16 abort: push creates new remote heads!
16 abort: push creates new remote heads on branch 'default'!
17 (did you forget to merge? use push -f to force)
17 (did you forget to merge? use push -f to force)
18 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
18 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
19 (branch merge, don't forget to commit)
19 (branch merge, don't forget to commit)
20 pushing to ../a
20 pushing to ../a
21 searching for changes
21 searching for changes
22 adding changesets
22 adding changesets
23 adding manifests
23 adding manifests
24 adding file changes
24 adding file changes
25 added 2 changesets with 1 changes to 1 files
25 added 2 changesets with 1 changes to 1 files
26 adding foo
26 adding foo
27 updating to branch default
27 updating to branch default
28 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
28 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30 created new head
30 created new head
31 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
31 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
32 created new head
32 created new head
33 merging foo
33 merging foo
34 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
34 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
35 (branch merge, don't forget to commit)
35 (branch merge, don't forget to commit)
36 pushing to ../c
36 pushing to ../c
37 searching for changes
37 searching for changes
38 abort: push creates new remote heads!
38 abort: push creates new remote heads on branch 'default'!
39 (did you forget to merge? use push -f to force)
39 (did you forget to merge? use push -f to force)
40 1
40 1
41 pushing to ../c
41 pushing to ../c
42 searching for changes
42 searching for changes
43 no changes found
43 no changes found
44 0
44 0
45 pushing to ../c
45 pushing to ../c
46 searching for changes
46 searching for changes
47 abort: push creates new remote heads!
47 abort: push creates new remote heads on branch 'default'!
48 (did you forget to merge? use push -f to force)
48 (you should pull and merge or use push -f to force)
49 1
49 1
50 pushing to ../c
50 pushing to ../c
51 searching for changes
51 searching for changes
52 abort: push creates new remote heads!
52 abort: push creates new remote heads on branch 'default'!
53 (did you forget to merge? use push -f to force)
53 (did you forget to merge? use push -f to force)
54 1
54 1
55 pushing to ../c
55 pushing to ../c
56 searching for changes
56 searching for changes
57 adding changesets
57 adding changesets
58 adding manifests
58 adding manifests
59 adding file changes
59 adding file changes
60 added 2 changesets with 2 changes to 1 files (+2 heads)
60 added 2 changesets with 2 changes to 1 files (+2 heads)
61 0
61 0
62 pushing to ../c
62 pushing to ../c
63 searching for changes
63 searching for changes
64 adding changesets
64 adding changesets
65 adding manifests
65 adding manifests
66 adding file changes
66 adding file changes
67 added 1 changesets with 1 changes to 1 files (-1 heads)
67 added 1 changesets with 1 changes to 1 files (-1 heads)
68 0
68 0
69 pushing to ../e
69 pushing to ../e
70 searching for changes
70 searching for changes
71 adding changesets
71 adding changesets
72 adding manifests
72 adding manifests
73 adding file changes
73 adding file changes
74 added 1 changesets with 1 changes to 1 files
74 added 1 changesets with 1 changes to 1 files
75 0
75 0
76 pushing to ../e
76 pushing to ../e
77 searching for changes
77 searching for changes
78 adding changesets
78 adding changesets
79 adding manifests
79 adding manifests
80 adding file changes
80 adding file changes
81 added 1 changesets with 1 changes to 1 files
81 added 1 changesets with 1 changes to 1 files
82 0
82 0
83 % issue 736
83 % issue 736
84 % push on existing branch and new branch
84 % push on existing branch and new branch
85 pushing to ../f
85 pushing to ../f
86 searching for changes
86 searching for changes
87 abort: push creates new remote branches: c!
87 abort: push creates new remote branches: c!
88 (use 'hg push -f' to force)
88 (use 'hg push -f' to force)
89 1
89 1
90 pushing to ../f
90 pushing to ../f
91 searching for changes
91 searching for changes
92 abort: push creates new remote branches: c!
92 abort: push creates new remote branches: c!
93 (use 'hg push -f' to force)
93 (use 'hg push -f' to force)
94 1
94 1
95 % multiple new branches
95 % multiple new branches
96 pushing to ../f
96 pushing to ../f
97 searching for changes
97 searching for changes
98 abort: push creates new remote branches: c, d!
98 abort: push creates new remote branches: c, d!
99 (use 'hg push -f' to force)
99 (use 'hg push -f' to force)
100 1
100 1
101 pushing to ../f
101 pushing to ../f
102 searching for changes
102 searching for changes
103 abort: push creates new remote branches: d!
103 abort: push creates new remote branches: d!
104 (use 'hg push -f' to force)
104 (use 'hg push -f' to force)
105 1
105 1
106 % fail on multiple head push
106 % fail on multiple head push
107 pushing to ../f
107 pushing to ../f
108 searching for changes
108 searching for changes
109 abort: push creates new remote heads!
109 abort: push creates new remote heads on branch 'a'!
110 (did you forget to merge? use push -f to force)
110 (you should pull and merge or use push -f to force)
111 1
111 1
112 % push replacement head on existing branches
112 % push replacement head on existing branches
113 pushing to ../f
113 pushing to ../f
114 searching for changes
114 searching for changes
115 adding changesets
115 adding changesets
116 adding manifests
116 adding manifests
117 adding file changes
117 adding file changes
118 added 2 changesets with 2 changes to 1 files
118 added 2 changesets with 2 changes to 1 files
119 0
119 0
120 % merge of branch a to other branch b followed by unrelated push on branch a
120 % merge of branch a to other branch b followed by unrelated push on branch a
121 pushing to ../f
121 pushing to ../f
122 searching for changes
122 searching for changes
123 adding changesets
123 adding changesets
124 adding manifests
124 adding manifests
125 adding file changes
125 adding file changes
126 added 1 changesets with 1 changes to 1 files (-1 heads)
126 added 1 changesets with 1 changes to 1 files (-1 heads)
127 0
127 0
128 pushing to ../f
128 pushing to ../f
129 searching for changes
129 searching for changes
130 adding changesets
130 adding changesets
131 adding manifests
131 adding manifests
132 adding file changes
132 adding file changes
133 added 1 changesets with 1 changes to 1 files (+1 heads)
133 added 1 changesets with 1 changes to 1 files (+1 heads)
134 0
134 0
135 % cheating the counting algorithm
135 % cheating the counting algorithm
136 pushing to ../f
136 pushing to ../f
137 searching for changes
137 searching for changes
138 adding changesets
138 adding changesets
139 adding manifests
139 adding manifests
140 adding file changes
140 adding file changes
141 added 2 changesets with 2 changes to 1 files
141 added 2 changesets with 2 changes to 1 files
142 0
142 0
143 % checking prepush logic does not allow silently pushing multiple new heads
143 % checking prepush logic does not allow silently pushing multiple new heads
144 adding init
144 adding init
145 adding a
145 adding a
146 updating to branch default
146 updating to branch default
147 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
147 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
148 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
148 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
149 adding b
149 adding b
150 created new head
150 created new head
151 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
151 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
152 adding c
152 adding c
153 created new head
153 created new head
154 pushing to h
154 pushing to h
155 searching for changes
155 searching for changes
156 abort: push creates new remote heads!
156 abort: push creates new remote heads on branch 'default'!
157 (did you forget to merge? use push -f to force)
157 (you should pull and merge or use push -f to force)
158
158
159 % check prepush logic with merged branches
159 % check prepush logic with merged branches
160 marked working directory as branch a
160 marked working directory as branch a
161 adding foo
161 adding foo
162 updating to branch a
162 updating to branch a
163 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
163 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
164 marked working directory as branch b
164 marked working directory as branch b
165 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
165 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
166 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
166 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
167 (branch merge, don't forget to commit)
167 (branch merge, don't forget to commit)
168 pushing to j
168 pushing to j
169 searching for changes
169 searching for changes
170 abort: push creates new remote heads!
170 abort: push creates new remote heads on branch 'a'!
171 (did you forget to merge? use push -f to force)
171 (you should pull and merge or use push -f to force)
172
172
@@ -1,257 +1,257 b''
1 % first revision, no sub
1 % first revision, no sub
2 adding a
2 adding a
3 % add first sub
3 % add first sub
4 adding a
4 adding a
5 committing subrepository s
5 committing subrepository s
6 % add sub sub
6 % add sub sub
7 committing subrepository s
7 committing subrepository s
8 committing subrepository ss
8 committing subrepository ss
9 % bump sub rev
9 % bump sub rev
10 committing subrepository s
10 committing subrepository s
11 % leave sub dirty
11 % leave sub dirty
12 committing subrepository s
12 committing subrepository s
13 changeset: 3:1c833a7a9e3a
13 changeset: 3:1c833a7a9e3a
14 tag: tip
14 tag: tip
15 user: test
15 user: test
16 date: Thu Jan 01 00:00:00 1970 +0000
16 date: Thu Jan 01 00:00:00 1970 +0000
17 summary: 4
17 summary: 4
18
18
19 % check caching
19 % check caching
20 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
20 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
21 % restore
21 % restore
22 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
22 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
23 path s
23 path s
24 source s
24 source s
25 revision 1c833a7a9e3a4445c711aaf0f012379cd0d4034e
25 revision 1c833a7a9e3a4445c711aaf0f012379cd0d4034e
26 % new branch for merge tests
26 % new branch for merge tests
27 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
27 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
28 adding t/t
28 adding t/t
29 % 5
29 % 5
30 committing subrepository t
30 committing subrepository t
31 created new head
31 created new head
32 % 6
32 % 6
33 committing subrepository t
33 committing subrepository t
34 path s
34 path s
35 source s
35 source s
36 revision e4ece1bf43360ddc8f6a96432201a37b7cd27ae4
36 revision e4ece1bf43360ddc8f6a96432201a37b7cd27ae4
37 path t
37 path t
38 source t
38 source t
39 revision 6747d179aa9a688023c4b0cad32e4c92bb7f34ad
39 revision 6747d179aa9a688023c4b0cad32e4c92bb7f34ad
40 % 7
40 % 7
41 committing subrepository t
41 committing subrepository t
42 % 8
42 % 8
43 % merge tests
43 % merge tests
44 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
44 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
45 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
45 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
46 (branch merge, don't forget to commit)
46 (branch merge, don't forget to commit)
47 path s
47 path s
48 source s
48 source s
49 revision fc627a69481fcbe5f1135069e8a3881c023e4cf5
49 revision fc627a69481fcbe5f1135069e8a3881c023e4cf5
50 path t
50 path t
51 source t
51 source t
52 revision 60ca1237c19474e7a3978b0dc1ca4e6f36d51382
52 revision 60ca1237c19474e7a3978b0dc1ca4e6f36d51382
53 created new head
53 created new head
54 searching for copies back to rev 2
54 searching for copies back to rev 2
55 resolving manifests
55 resolving manifests
56 overwrite None partial False
56 overwrite None partial False
57 ancestor 1f14a2e2d3ec local f0d2028bf86d+ remote 1831e14459c4
57 ancestor 1f14a2e2d3ec local f0d2028bf86d+ remote 1831e14459c4
58 .hgsubstate: versions differ -> m
58 .hgsubstate: versions differ -> m
59 subrepo merge f0d2028bf86d+ 1831e14459c4 1f14a2e2d3ec
59 subrepo merge f0d2028bf86d+ 1831e14459c4 1f14a2e2d3ec
60 subrepo t: other changed, get t:6747d179aa9a688023c4b0cad32e4c92bb7f34ad:hg
60 subrepo t: other changed, get t:6747d179aa9a688023c4b0cad32e4c92bb7f34ad:hg
61 getting subrepo t
61 getting subrepo t
62 resolving manifests
62 resolving manifests
63 overwrite True partial False
63 overwrite True partial False
64 ancestor 60ca1237c194+ local 60ca1237c194+ remote 6747d179aa9a
64 ancestor 60ca1237c194+ local 60ca1237c194+ remote 6747d179aa9a
65 t: remote is newer -> g
65 t: remote is newer -> g
66 getting t
66 getting t
67 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
67 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
68 (branch merge, don't forget to commit)
68 (branch merge, don't forget to commit)
69 path s
69 path s
70 source s
70 source s
71 revision fc627a69481fcbe5f1135069e8a3881c023e4cf5
71 revision fc627a69481fcbe5f1135069e8a3881c023e4cf5
72 path t
72 path t
73 source t
73 source t
74 revision 6747d179aa9a688023c4b0cad32e4c92bb7f34ad
74 revision 6747d179aa9a688023c4b0cad32e4c92bb7f34ad
75 committing subrepository t
75 committing subrepository t
76 searching for copies back to rev 2
76 searching for copies back to rev 2
77 resolving manifests
77 resolving manifests
78 overwrite None partial False
78 overwrite None partial False
79 ancestor 1831e14459c4 local e45c8b14af55+ remote f94576341bcf
79 ancestor 1831e14459c4 local e45c8b14af55+ remote f94576341bcf
80 .hgsubstate: versions differ -> m
80 .hgsubstate: versions differ -> m
81 subrepo merge e45c8b14af55+ f94576341bcf 1831e14459c4
81 subrepo merge e45c8b14af55+ f94576341bcf 1831e14459c4
82 subrepo t: both sides changed, merge with t:7af322bc1198a32402fe903e0b7ebcfc5c9bf8f4:hg
82 subrepo t: both sides changed, merge with t:7af322bc1198a32402fe903e0b7ebcfc5c9bf8f4:hg
83 merging subrepo t
83 merging subrepo t
84 searching for copies back to rev 2
84 searching for copies back to rev 2
85 resolving manifests
85 resolving manifests
86 overwrite None partial False
86 overwrite None partial False
87 ancestor 6747d179aa9a local 20a0db6fbf6c+ remote 7af322bc1198
87 ancestor 6747d179aa9a local 20a0db6fbf6c+ remote 7af322bc1198
88 t: versions differ -> m
88 t: versions differ -> m
89 preserving t for resolve of t
89 preserving t for resolve of t
90 picked tool 'internal:merge' for t (binary False symlink False)
90 picked tool 'internal:merge' for t (binary False symlink False)
91 merging t
91 merging t
92 my t@20a0db6fbf6c+ other t@7af322bc1198 ancestor t@6747d179aa9a
92 my t@20a0db6fbf6c+ other t@7af322bc1198 ancestor t@6747d179aa9a
93 warning: conflicts during merge.
93 warning: conflicts during merge.
94 merging t failed!
94 merging t failed!
95 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
95 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
96 use 'hg resolve' to retry unresolved file merges or 'hg update -C' to abandon
96 use 'hg resolve' to retry unresolved file merges or 'hg update -C' to abandon
97 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
97 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
98 (branch merge, don't forget to commit)
98 (branch merge, don't forget to commit)
99 % should conflict
99 % should conflict
100 <<<<<<< local
100 <<<<<<< local
101 conflict
101 conflict
102 =======
102 =======
103 t3
103 t3
104 >>>>>>> other
104 >>>>>>> other
105 % clone
105 % clone
106 updating to branch default
106 updating to branch default
107 pulling subrepo s
107 pulling subrepo s
108 requesting all changes
108 requesting all changes
109 adding changesets
109 adding changesets
110 adding manifests
110 adding manifests
111 adding file changes
111 adding file changes
112 added 4 changesets with 5 changes to 3 files
112 added 4 changesets with 5 changes to 3 files
113 pulling subrepo ss
113 pulling subrepo ss
114 requesting all changes
114 requesting all changes
115 adding changesets
115 adding changesets
116 adding manifests
116 adding manifests
117 adding file changes
117 adding file changes
118 added 1 changesets with 1 changes to 1 files
118 added 1 changesets with 1 changes to 1 files
119 pulling subrepo t
119 pulling subrepo t
120 requesting all changes
120 requesting all changes
121 adding changesets
121 adding changesets
122 adding manifests
122 adding manifests
123 adding file changes
123 adding file changes
124 added 4 changesets with 4 changes to 1 files (+1 heads)
124 added 4 changesets with 4 changes to 1 files (+1 heads)
125 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
125 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
126 path s
126 path s
127 source s
127 source s
128 revision fc627a69481fcbe5f1135069e8a3881c023e4cf5
128 revision fc627a69481fcbe5f1135069e8a3881c023e4cf5
129 path t
129 path t
130 source t
130 source t
131 revision 20a0db6fbf6c3d2836e6519a642ae929bfc67c0e
131 revision 20a0db6fbf6c3d2836e6519a642ae929bfc67c0e
132 % push
132 % push
133 committing subrepository t
133 committing subrepository t
134 pushing ...sub/t
134 pushing ...sub/t
135 pushing ...subrepo ss
135 pushing ...subrepo ss
136 searching for changes
136 searching for changes
137 no changes found
137 no changes found
138 pushing ...subrepo s
138 pushing ...subrepo s
139 searching for changes
139 searching for changes
140 no changes found
140 no changes found
141 pushing ...subrepo t
141 pushing ...subrepo t
142 searching for changes
142 searching for changes
143 adding changesets
143 adding changesets
144 adding manifests
144 adding manifests
145 adding file changes
145 adding file changes
146 added 1 changesets with 1 changes to 1 files
146 added 1 changesets with 1 changes to 1 files
147 searching for changes
147 searching for changes
148 adding changesets
148 adding changesets
149 adding manifests
149 adding manifests
150 adding file changes
150 adding file changes
151 added 1 changesets with 1 changes to 1 files
151 added 1 changesets with 1 changes to 1 files
152 % push -f
152 % push -f
153 committing subrepository s
153 committing subrepository s
154 abort: push creates new remote heads!
154 abort: push creates new remote heads on branch 'default'!
155 pushing ...sub/t
155 pushing ...sub/t
156 pushing ...subrepo ss
156 pushing ...subrepo ss
157 searching for changes
157 searching for changes
158 no changes found
158 no changes found
159 pushing ...subrepo s
159 pushing ...subrepo s
160 searching for changes
160 searching for changes
161 (did you forget to merge? use push -f to force)
161 (did you forget to merge? use push -f to force)
162 pushing ...subrepo t
162 pushing ...subrepo t
163 searching for changes
163 searching for changes
164 no changes found
164 no changes found
165 searching for changes
165 searching for changes
166 adding changesets
166 adding changesets
167 adding manifests
167 adding manifests
168 adding file changes
168 adding file changes
169 added 1 changesets with 1 changes to 1 files
169 added 1 changesets with 1 changes to 1 files
170 pushing ...sub/t
170 pushing ...sub/t
171 pushing ...subrepo ss
171 pushing ...subrepo ss
172 searching for changes
172 searching for changes
173 no changes found
173 no changes found
174 pushing ...subrepo s
174 pushing ...subrepo s
175 searching for changes
175 searching for changes
176 adding changesets
176 adding changesets
177 adding manifests
177 adding manifests
178 adding file changes
178 adding file changes
179 added 1 changesets with 1 changes to 1 files (+1 heads)
179 added 1 changesets with 1 changes to 1 files (+1 heads)
180 pushing ...subrepo t
180 pushing ...subrepo t
181 searching for changes
181 searching for changes
182 no changes found
182 no changes found
183 searching for changes
183 searching for changes
184 no changes found
184 no changes found
185 % update
185 % update
186 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
186 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
187 committing subrepository t
187 committing subrepository t
188 % pull
188 % pull
189 pulling ...sub/t
189 pulling ...sub/t
190 searching for changes
190 searching for changes
191 adding changesets
191 adding changesets
192 adding manifests
192 adding manifests
193 adding file changes
193 adding file changes
194 added 1 changesets with 1 changes to 1 files
194 added 1 changesets with 1 changes to 1 files
195 (run 'hg update' to get a working copy)
195 (run 'hg update' to get a working copy)
196 pulling subrepo t
196 pulling subrepo t
197 searching for changes
197 searching for changes
198 adding changesets
198 adding changesets
199 adding manifests
199 adding manifests
200 adding file changes
200 adding file changes
201 added 1 changesets with 1 changes to 1 files
201 added 1 changesets with 1 changes to 1 files
202 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
202 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
203 blah
203 blah
204 % bogus subrepo path aborts
204 % bogus subrepo path aborts
205 abort: missing ] in subrepo source
205 abort: missing ] in subrepo source
206 % issue 1986
206 % issue 1986
207 adding a
207 adding a
208 marked working directory as branch br
208 marked working directory as branch br
209 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
209 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
210 adding b
210 adding b
211 created new head
211 created new head
212 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
212 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
213 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
213 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
214 (branch merge, don't forget to commit)
214 (branch merge, don't forget to commit)
215 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
215 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
216 adding c
216 adding c
217 created new head
217 created new head
218 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
218 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
219 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
219 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
220 (branch merge, don't forget to commit)
220 (branch merge, don't forget to commit)
221 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
221 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
222 adding .hgsub
222 adding .hgsub
223 committing subrepository s
223 committing subrepository s
224 marked working directory as branch br
224 marked working directory as branch br
225 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
225 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
226 adding b
226 adding b
227 committing subrepository s
227 committing subrepository s
228 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
228 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
229 adding c
229 adding c
230 created new head
230 created new head
231 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
231 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
232 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
232 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
233 (branch merge, don't forget to commit)
233 (branch merge, don't forget to commit)
234 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
234 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
235 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
235 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
236 adding d
236 adding d
237 committing subrepository s
237 committing subrepository s
238 created new head
238 created new head
239 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
239 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
240 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
240 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
241 adding e
241 adding e
242 committing subrepository s
242 committing subrepository s
243 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
243 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
244 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
244 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
245 (branch merge, don't forget to commit)
245 (branch merge, don't forget to commit)
246 % test repository cloning
246 % test repository cloning
247 adding nested_absolute/foo
247 adding nested_absolute/foo
248 adding nested_relative/foo2
248 adding nested_relative/foo2
249 adding main/.hgsub
249 adding main/.hgsub
250 committing subrepository nested_relative
250 committing subrepository nested_relative
251 committing subrepository nested_absolute
251 committing subrepository nested_absolute
252 updating to branch default
252 updating to branch default
253 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
253 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
254 [paths]
254 [paths]
255 default = /tmp/mercurial/main/nested_absolute
255 default = /tmp/mercurial/main/nested_absolute
256 [paths]
256 [paths]
257 default = /tmp/mercurial/main/nested_relative
257 default = /tmp/mercurial/main/nested_relative
General Comments 0
You need to be logged in to leave comments. Login now