##// END OF EJS Templates
commit: if relevant, tell user their commit message was saved....
Greg Ward -
r9935:48b81d9b default
parent child Browse files
Show More
@@ -1,2173 +1,2181 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo
10 import repo, changegroup, subrepo
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16 import tags as tags_
16 import tags as tags_
17 from lock import release
17 from lock import release
18 import weakref, stat, errno, os, time, inspect
18 import weakref, stat, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20
20
21 class localrepository(repo.repository):
21 class localrepository(repo.repository):
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
23 supported = set('revlogv1 store fncache shared'.split())
23 supported = set('revlogv1 store fncache shared'.split())
24
24
25 def __init__(self, baseui, path=None, create=0):
25 def __init__(self, baseui, path=None, create=0):
26 repo.repository.__init__(self)
26 repo.repository.__init__(self)
27 self.root = os.path.realpath(path)
27 self.root = os.path.realpath(path)
28 self.path = os.path.join(self.root, ".hg")
28 self.path = os.path.join(self.root, ".hg")
29 self.origroot = path
29 self.origroot = path
30 self.opener = util.opener(self.path)
30 self.opener = util.opener(self.path)
31 self.wopener = util.opener(self.root)
31 self.wopener = util.opener(self.root)
32 self.baseui = baseui
32 self.baseui = baseui
33 self.ui = baseui.copy()
33 self.ui = baseui.copy()
34
34
35 try:
35 try:
36 self.ui.readconfig(self.join("hgrc"), self.root)
36 self.ui.readconfig(self.join("hgrc"), self.root)
37 extensions.loadall(self.ui)
37 extensions.loadall(self.ui)
38 except IOError:
38 except IOError:
39 pass
39 pass
40
40
41 if not os.path.isdir(self.path):
41 if not os.path.isdir(self.path):
42 if create:
42 if create:
43 if not os.path.exists(path):
43 if not os.path.exists(path):
44 os.mkdir(path)
44 os.mkdir(path)
45 os.mkdir(self.path)
45 os.mkdir(self.path)
46 requirements = ["revlogv1"]
46 requirements = ["revlogv1"]
47 if self.ui.configbool('format', 'usestore', True):
47 if self.ui.configbool('format', 'usestore', True):
48 os.mkdir(os.path.join(self.path, "store"))
48 os.mkdir(os.path.join(self.path, "store"))
49 requirements.append("store")
49 requirements.append("store")
50 if self.ui.configbool('format', 'usefncache', True):
50 if self.ui.configbool('format', 'usefncache', True):
51 requirements.append("fncache")
51 requirements.append("fncache")
52 # create an invalid changelog
52 # create an invalid changelog
53 self.opener("00changelog.i", "a").write(
53 self.opener("00changelog.i", "a").write(
54 '\0\0\0\2' # represents revlogv2
54 '\0\0\0\2' # represents revlogv2
55 ' dummy changelog to prevent using the old repo layout'
55 ' dummy changelog to prevent using the old repo layout'
56 )
56 )
57 reqfile = self.opener("requires", "w")
57 reqfile = self.opener("requires", "w")
58 for r in requirements:
58 for r in requirements:
59 reqfile.write("%s\n" % r)
59 reqfile.write("%s\n" % r)
60 reqfile.close()
60 reqfile.close()
61 else:
61 else:
62 raise error.RepoError(_("repository %s not found") % path)
62 raise error.RepoError(_("repository %s not found") % path)
63 elif create:
63 elif create:
64 raise error.RepoError(_("repository %s already exists") % path)
64 raise error.RepoError(_("repository %s already exists") % path)
65 else:
65 else:
66 # find requirements
66 # find requirements
67 requirements = set()
67 requirements = set()
68 try:
68 try:
69 requirements = set(self.opener("requires").read().splitlines())
69 requirements = set(self.opener("requires").read().splitlines())
70 except IOError, inst:
70 except IOError, inst:
71 if inst.errno != errno.ENOENT:
71 if inst.errno != errno.ENOENT:
72 raise
72 raise
73 for r in requirements - self.supported:
73 for r in requirements - self.supported:
74 raise error.RepoError(_("requirement '%s' not supported") % r)
74 raise error.RepoError(_("requirement '%s' not supported") % r)
75
75
76 self.sharedpath = self.path
76 self.sharedpath = self.path
77 try:
77 try:
78 s = os.path.realpath(self.opener("sharedpath").read())
78 s = os.path.realpath(self.opener("sharedpath").read())
79 if not os.path.exists(s):
79 if not os.path.exists(s):
80 raise error.RepoError(
80 raise error.RepoError(
81 _('.hg/sharedpath points to nonexistent directory %s') % s)
81 _('.hg/sharedpath points to nonexistent directory %s') % s)
82 self.sharedpath = s
82 self.sharedpath = s
83 except IOError, inst:
83 except IOError, inst:
84 if inst.errno != errno.ENOENT:
84 if inst.errno != errno.ENOENT:
85 raise
85 raise
86
86
87 self.store = store.store(requirements, self.sharedpath, util.opener)
87 self.store = store.store(requirements, self.sharedpath, util.opener)
88 self.spath = self.store.path
88 self.spath = self.store.path
89 self.sopener = self.store.opener
89 self.sopener = self.store.opener
90 self.sjoin = self.store.join
90 self.sjoin = self.store.join
91 self.opener.createmode = self.store.createmode
91 self.opener.createmode = self.store.createmode
92
92
93 # These two define the set of tags for this repository. _tags
93 # These two define the set of tags for this repository. _tags
94 # maps tag name to node; _tagtypes maps tag name to 'global' or
94 # maps tag name to node; _tagtypes maps tag name to 'global' or
95 # 'local'. (Global tags are defined by .hgtags across all
95 # 'local'. (Global tags are defined by .hgtags across all
96 # heads, and local tags are defined in .hg/localtags.) They
96 # heads, and local tags are defined in .hg/localtags.) They
97 # constitute the in-memory cache of tags.
97 # constitute the in-memory cache of tags.
98 self._tags = None
98 self._tags = None
99 self._tagtypes = None
99 self._tagtypes = None
100
100
101 self._branchcache = None # in UTF-8
101 self._branchcache = None # in UTF-8
102 self._branchcachetip = None
102 self._branchcachetip = None
103 self.nodetagscache = None
103 self.nodetagscache = None
104 self.filterpats = {}
104 self.filterpats = {}
105 self._datafilters = {}
105 self._datafilters = {}
106 self._transref = self._lockref = self._wlockref = None
106 self._transref = self._lockref = self._wlockref = None
107
107
108 @propertycache
108 @propertycache
109 def changelog(self):
109 def changelog(self):
110 c = changelog.changelog(self.sopener)
110 c = changelog.changelog(self.sopener)
111 if 'HG_PENDING' in os.environ:
111 if 'HG_PENDING' in os.environ:
112 p = os.environ['HG_PENDING']
112 p = os.environ['HG_PENDING']
113 if p.startswith(self.root):
113 if p.startswith(self.root):
114 c.readpending('00changelog.i.a')
114 c.readpending('00changelog.i.a')
115 self.sopener.defversion = c.version
115 self.sopener.defversion = c.version
116 return c
116 return c
117
117
118 @propertycache
118 @propertycache
119 def manifest(self):
119 def manifest(self):
120 return manifest.manifest(self.sopener)
120 return manifest.manifest(self.sopener)
121
121
122 @propertycache
122 @propertycache
123 def dirstate(self):
123 def dirstate(self):
124 return dirstate.dirstate(self.opener, self.ui, self.root)
124 return dirstate.dirstate(self.opener, self.ui, self.root)
125
125
126 def __getitem__(self, changeid):
126 def __getitem__(self, changeid):
127 if changeid is None:
127 if changeid is None:
128 return context.workingctx(self)
128 return context.workingctx(self)
129 return context.changectx(self, changeid)
129 return context.changectx(self, changeid)
130
130
131 def __contains__(self, changeid):
131 def __contains__(self, changeid):
132 try:
132 try:
133 return bool(self.lookup(changeid))
133 return bool(self.lookup(changeid))
134 except error.RepoLookupError:
134 except error.RepoLookupError:
135 return False
135 return False
136
136
137 def __nonzero__(self):
137 def __nonzero__(self):
138 return True
138 return True
139
139
140 def __len__(self):
140 def __len__(self):
141 return len(self.changelog)
141 return len(self.changelog)
142
142
143 def __iter__(self):
143 def __iter__(self):
144 for i in xrange(len(self)):
144 for i in xrange(len(self)):
145 yield i
145 yield i
146
146
147 def url(self):
147 def url(self):
148 return 'file:' + self.root
148 return 'file:' + self.root
149
149
150 def hook(self, name, throw=False, **args):
150 def hook(self, name, throw=False, **args):
151 return hook.hook(self.ui, self, name, throw, **args)
151 return hook.hook(self.ui, self, name, throw, **args)
152
152
153 tag_disallowed = ':\r\n'
153 tag_disallowed = ':\r\n'
154
154
155 def _tag(self, names, node, message, local, user, date, extra={}):
155 def _tag(self, names, node, message, local, user, date, extra={}):
156 if isinstance(names, str):
156 if isinstance(names, str):
157 allchars = names
157 allchars = names
158 names = (names,)
158 names = (names,)
159 else:
159 else:
160 allchars = ''.join(names)
160 allchars = ''.join(names)
161 for c in self.tag_disallowed:
161 for c in self.tag_disallowed:
162 if c in allchars:
162 if c in allchars:
163 raise util.Abort(_('%r cannot be used in a tag name') % c)
163 raise util.Abort(_('%r cannot be used in a tag name') % c)
164
164
165 for name in names:
165 for name in names:
166 self.hook('pretag', throw=True, node=hex(node), tag=name,
166 self.hook('pretag', throw=True, node=hex(node), tag=name,
167 local=local)
167 local=local)
168
168
169 def writetags(fp, names, munge, prevtags):
169 def writetags(fp, names, munge, prevtags):
170 fp.seek(0, 2)
170 fp.seek(0, 2)
171 if prevtags and prevtags[-1] != '\n':
171 if prevtags and prevtags[-1] != '\n':
172 fp.write('\n')
172 fp.write('\n')
173 for name in names:
173 for name in names:
174 m = munge and munge(name) or name
174 m = munge and munge(name) or name
175 if self._tagtypes and name in self._tagtypes:
175 if self._tagtypes and name in self._tagtypes:
176 old = self._tags.get(name, nullid)
176 old = self._tags.get(name, nullid)
177 fp.write('%s %s\n' % (hex(old), m))
177 fp.write('%s %s\n' % (hex(old), m))
178 fp.write('%s %s\n' % (hex(node), m))
178 fp.write('%s %s\n' % (hex(node), m))
179 fp.close()
179 fp.close()
180
180
181 prevtags = ''
181 prevtags = ''
182 if local:
182 if local:
183 try:
183 try:
184 fp = self.opener('localtags', 'r+')
184 fp = self.opener('localtags', 'r+')
185 except IOError:
185 except IOError:
186 fp = self.opener('localtags', 'a')
186 fp = self.opener('localtags', 'a')
187 else:
187 else:
188 prevtags = fp.read()
188 prevtags = fp.read()
189
189
190 # local tags are stored in the current charset
190 # local tags are stored in the current charset
191 writetags(fp, names, None, prevtags)
191 writetags(fp, names, None, prevtags)
192 for name in names:
192 for name in names:
193 self.hook('tag', node=hex(node), tag=name, local=local)
193 self.hook('tag', node=hex(node), tag=name, local=local)
194 return
194 return
195
195
196 try:
196 try:
197 fp = self.wfile('.hgtags', 'rb+')
197 fp = self.wfile('.hgtags', 'rb+')
198 except IOError:
198 except IOError:
199 fp = self.wfile('.hgtags', 'ab')
199 fp = self.wfile('.hgtags', 'ab')
200 else:
200 else:
201 prevtags = fp.read()
201 prevtags = fp.read()
202
202
203 # committed tags are stored in UTF-8
203 # committed tags are stored in UTF-8
204 writetags(fp, names, encoding.fromlocal, prevtags)
204 writetags(fp, names, encoding.fromlocal, prevtags)
205
205
206 if '.hgtags' not in self.dirstate:
206 if '.hgtags' not in self.dirstate:
207 self.add(['.hgtags'])
207 self.add(['.hgtags'])
208
208
209 m = match_.exact(self.root, '', ['.hgtags'])
209 m = match_.exact(self.root, '', ['.hgtags'])
210 tagnode = self.commit(message, user, date, extra=extra, match=m)
210 tagnode = self.commit(message, user, date, extra=extra, match=m)
211
211
212 for name in names:
212 for name in names:
213 self.hook('tag', node=hex(node), tag=name, local=local)
213 self.hook('tag', node=hex(node), tag=name, local=local)
214
214
215 return tagnode
215 return tagnode
216
216
217 def tag(self, names, node, message, local, user, date):
217 def tag(self, names, node, message, local, user, date):
218 '''tag a revision with one or more symbolic names.
218 '''tag a revision with one or more symbolic names.
219
219
220 names is a list of strings or, when adding a single tag, names may be a
220 names is a list of strings or, when adding a single tag, names may be a
221 string.
221 string.
222
222
223 if local is True, the tags are stored in a per-repository file.
223 if local is True, the tags are stored in a per-repository file.
224 otherwise, they are stored in the .hgtags file, and a new
224 otherwise, they are stored in the .hgtags file, and a new
225 changeset is committed with the change.
225 changeset is committed with the change.
226
226
227 keyword arguments:
227 keyword arguments:
228
228
229 local: whether to store tags in non-version-controlled file
229 local: whether to store tags in non-version-controlled file
230 (default False)
230 (default False)
231
231
232 message: commit message to use if committing
232 message: commit message to use if committing
233
233
234 user: name of user to use if committing
234 user: name of user to use if committing
235
235
236 date: date tuple to use if committing'''
236 date: date tuple to use if committing'''
237
237
238 for x in self.status()[:5]:
238 for x in self.status()[:5]:
239 if '.hgtags' in x:
239 if '.hgtags' in x:
240 raise util.Abort(_('working copy of .hgtags is changed '
240 raise util.Abort(_('working copy of .hgtags is changed '
241 '(please commit .hgtags manually)'))
241 '(please commit .hgtags manually)'))
242
242
243 self.tags() # instantiate the cache
243 self.tags() # instantiate the cache
244 self._tag(names, node, message, local, user, date)
244 self._tag(names, node, message, local, user, date)
245
245
246 def tags(self):
246 def tags(self):
247 '''return a mapping of tag to node'''
247 '''return a mapping of tag to node'''
248 if self._tags is None:
248 if self._tags is None:
249 (self._tags, self._tagtypes) = self._findtags()
249 (self._tags, self._tagtypes) = self._findtags()
250
250
251 return self._tags
251 return self._tags
252
252
253 def _findtags(self):
253 def _findtags(self):
254 '''Do the hard work of finding tags. Return a pair of dicts
254 '''Do the hard work of finding tags. Return a pair of dicts
255 (tags, tagtypes) where tags maps tag name to node, and tagtypes
255 (tags, tagtypes) where tags maps tag name to node, and tagtypes
256 maps tag name to a string like \'global\' or \'local\'.
256 maps tag name to a string like \'global\' or \'local\'.
257 Subclasses or extensions are free to add their own tags, but
257 Subclasses or extensions are free to add their own tags, but
258 should be aware that the returned dicts will be retained for the
258 should be aware that the returned dicts will be retained for the
259 duration of the localrepo object.'''
259 duration of the localrepo object.'''
260
260
261 # XXX what tagtype should subclasses/extensions use? Currently
261 # XXX what tagtype should subclasses/extensions use? Currently
262 # mq and bookmarks add tags, but do not set the tagtype at all.
262 # mq and bookmarks add tags, but do not set the tagtype at all.
263 # Should each extension invent its own tag type? Should there
263 # Should each extension invent its own tag type? Should there
264 # be one tagtype for all such "virtual" tags? Or is the status
264 # be one tagtype for all such "virtual" tags? Or is the status
265 # quo fine?
265 # quo fine?
266
266
267 alltags = {} # map tag name to (node, hist)
267 alltags = {} # map tag name to (node, hist)
268 tagtypes = {}
268 tagtypes = {}
269
269
270 tags_.findglobaltags(self.ui, self, alltags, tagtypes)
270 tags_.findglobaltags(self.ui, self, alltags, tagtypes)
271 tags_.readlocaltags(self.ui, self, alltags, tagtypes)
271 tags_.readlocaltags(self.ui, self, alltags, tagtypes)
272
272
273 # Build the return dicts. Have to re-encode tag names because
273 # Build the return dicts. Have to re-encode tag names because
274 # the tags module always uses UTF-8 (in order not to lose info
274 # the tags module always uses UTF-8 (in order not to lose info
275 # writing to the cache), but the rest of Mercurial wants them in
275 # writing to the cache), but the rest of Mercurial wants them in
276 # local encoding.
276 # local encoding.
277 tags = {}
277 tags = {}
278 for (name, (node, hist)) in alltags.iteritems():
278 for (name, (node, hist)) in alltags.iteritems():
279 if node != nullid:
279 if node != nullid:
280 tags[encoding.tolocal(name)] = node
280 tags[encoding.tolocal(name)] = node
281 tags['tip'] = self.changelog.tip()
281 tags['tip'] = self.changelog.tip()
282 tagtypes = dict([(encoding.tolocal(name), value)
282 tagtypes = dict([(encoding.tolocal(name), value)
283 for (name, value) in tagtypes.iteritems()])
283 for (name, value) in tagtypes.iteritems()])
284 return (tags, tagtypes)
284 return (tags, tagtypes)
285
285
286 def tagtype(self, tagname):
286 def tagtype(self, tagname):
287 '''
287 '''
288 return the type of the given tag. result can be:
288 return the type of the given tag. result can be:
289
289
290 'local' : a local tag
290 'local' : a local tag
291 'global' : a global tag
291 'global' : a global tag
292 None : tag does not exist
292 None : tag does not exist
293 '''
293 '''
294
294
295 self.tags()
295 self.tags()
296
296
297 return self._tagtypes.get(tagname)
297 return self._tagtypes.get(tagname)
298
298
299 def tagslist(self):
299 def tagslist(self):
300 '''return a list of tags ordered by revision'''
300 '''return a list of tags ordered by revision'''
301 l = []
301 l = []
302 for t, n in self.tags().iteritems():
302 for t, n in self.tags().iteritems():
303 try:
303 try:
304 r = self.changelog.rev(n)
304 r = self.changelog.rev(n)
305 except:
305 except:
306 r = -2 # sort to the beginning of the list if unknown
306 r = -2 # sort to the beginning of the list if unknown
307 l.append((r, t, n))
307 l.append((r, t, n))
308 return [(t, n) for r, t, n in sorted(l)]
308 return [(t, n) for r, t, n in sorted(l)]
309
309
310 def nodetags(self, node):
310 def nodetags(self, node):
311 '''return the tags associated with a node'''
311 '''return the tags associated with a node'''
312 if not self.nodetagscache:
312 if not self.nodetagscache:
313 self.nodetagscache = {}
313 self.nodetagscache = {}
314 for t, n in self.tags().iteritems():
314 for t, n in self.tags().iteritems():
315 self.nodetagscache.setdefault(n, []).append(t)
315 self.nodetagscache.setdefault(n, []).append(t)
316 return self.nodetagscache.get(node, [])
316 return self.nodetagscache.get(node, [])
317
317
318 def _branchtags(self, partial, lrev):
318 def _branchtags(self, partial, lrev):
319 # TODO: rename this function?
319 # TODO: rename this function?
320 tiprev = len(self) - 1
320 tiprev = len(self) - 1
321 if lrev != tiprev:
321 if lrev != tiprev:
322 self._updatebranchcache(partial, lrev+1, tiprev+1)
322 self._updatebranchcache(partial, lrev+1, tiprev+1)
323 self._writebranchcache(partial, self.changelog.tip(), tiprev)
323 self._writebranchcache(partial, self.changelog.tip(), tiprev)
324
324
325 return partial
325 return partial
326
326
327 def branchmap(self):
327 def branchmap(self):
328 tip = self.changelog.tip()
328 tip = self.changelog.tip()
329 if self._branchcache is not None and self._branchcachetip == tip:
329 if self._branchcache is not None and self._branchcachetip == tip:
330 return self._branchcache
330 return self._branchcache
331
331
332 oldtip = self._branchcachetip
332 oldtip = self._branchcachetip
333 self._branchcachetip = tip
333 self._branchcachetip = tip
334 if oldtip is None or oldtip not in self.changelog.nodemap:
334 if oldtip is None or oldtip not in self.changelog.nodemap:
335 partial, last, lrev = self._readbranchcache()
335 partial, last, lrev = self._readbranchcache()
336 else:
336 else:
337 lrev = self.changelog.rev(oldtip)
337 lrev = self.changelog.rev(oldtip)
338 partial = self._branchcache
338 partial = self._branchcache
339
339
340 self._branchtags(partial, lrev)
340 self._branchtags(partial, lrev)
341 # this private cache holds all heads (not just tips)
341 # this private cache holds all heads (not just tips)
342 self._branchcache = partial
342 self._branchcache = partial
343
343
344 return self._branchcache
344 return self._branchcache
345
345
346 def branchtags(self):
346 def branchtags(self):
347 '''return a dict where branch names map to the tipmost head of
347 '''return a dict where branch names map to the tipmost head of
348 the branch, open heads come before closed'''
348 the branch, open heads come before closed'''
349 bt = {}
349 bt = {}
350 for bn, heads in self.branchmap().iteritems():
350 for bn, heads in self.branchmap().iteritems():
351 head = None
351 head = None
352 for i in range(len(heads)-1, -1, -1):
352 for i in range(len(heads)-1, -1, -1):
353 h = heads[i]
353 h = heads[i]
354 if 'close' not in self.changelog.read(h)[5]:
354 if 'close' not in self.changelog.read(h)[5]:
355 head = h
355 head = h
356 break
356 break
357 # no open heads were found
357 # no open heads were found
358 if head is None:
358 if head is None:
359 head = heads[-1]
359 head = heads[-1]
360 bt[bn] = head
360 bt[bn] = head
361 return bt
361 return bt
362
362
363
363
364 def _readbranchcache(self):
364 def _readbranchcache(self):
365 partial = {}
365 partial = {}
366 try:
366 try:
367 f = self.opener("branchheads.cache")
367 f = self.opener("branchheads.cache")
368 lines = f.read().split('\n')
368 lines = f.read().split('\n')
369 f.close()
369 f.close()
370 except (IOError, OSError):
370 except (IOError, OSError):
371 return {}, nullid, nullrev
371 return {}, nullid, nullrev
372
372
373 try:
373 try:
374 last, lrev = lines.pop(0).split(" ", 1)
374 last, lrev = lines.pop(0).split(" ", 1)
375 last, lrev = bin(last), int(lrev)
375 last, lrev = bin(last), int(lrev)
376 if lrev >= len(self) or self[lrev].node() != last:
376 if lrev >= len(self) or self[lrev].node() != last:
377 # invalidate the cache
377 # invalidate the cache
378 raise ValueError('invalidating branch cache (tip differs)')
378 raise ValueError('invalidating branch cache (tip differs)')
379 for l in lines:
379 for l in lines:
380 if not l: continue
380 if not l: continue
381 node, label = l.split(" ", 1)
381 node, label = l.split(" ", 1)
382 partial.setdefault(label.strip(), []).append(bin(node))
382 partial.setdefault(label.strip(), []).append(bin(node))
383 except KeyboardInterrupt:
383 except KeyboardInterrupt:
384 raise
384 raise
385 except Exception, inst:
385 except Exception, inst:
386 if self.ui.debugflag:
386 if self.ui.debugflag:
387 self.ui.warn(str(inst), '\n')
387 self.ui.warn(str(inst), '\n')
388 partial, last, lrev = {}, nullid, nullrev
388 partial, last, lrev = {}, nullid, nullrev
389 return partial, last, lrev
389 return partial, last, lrev
390
390
391 def _writebranchcache(self, branches, tip, tiprev):
391 def _writebranchcache(self, branches, tip, tiprev):
392 try:
392 try:
393 f = self.opener("branchheads.cache", "w", atomictemp=True)
393 f = self.opener("branchheads.cache", "w", atomictemp=True)
394 f.write("%s %s\n" % (hex(tip), tiprev))
394 f.write("%s %s\n" % (hex(tip), tiprev))
395 for label, nodes in branches.iteritems():
395 for label, nodes in branches.iteritems():
396 for node in nodes:
396 for node in nodes:
397 f.write("%s %s\n" % (hex(node), label))
397 f.write("%s %s\n" % (hex(node), label))
398 f.rename()
398 f.rename()
399 except (IOError, OSError):
399 except (IOError, OSError):
400 pass
400 pass
401
401
402 def _updatebranchcache(self, partial, start, end):
402 def _updatebranchcache(self, partial, start, end):
403 # collect new branch entries
403 # collect new branch entries
404 newbranches = {}
404 newbranches = {}
405 for r in xrange(start, end):
405 for r in xrange(start, end):
406 c = self[r]
406 c = self[r]
407 newbranches.setdefault(c.branch(), []).append(c.node())
407 newbranches.setdefault(c.branch(), []).append(c.node())
408 # if older branchheads are reachable from new ones, they aren't
408 # if older branchheads are reachable from new ones, they aren't
409 # really branchheads. Note checking parents is insufficient:
409 # really branchheads. Note checking parents is insufficient:
410 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
410 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
411 for branch, newnodes in newbranches.iteritems():
411 for branch, newnodes in newbranches.iteritems():
412 bheads = partial.setdefault(branch, [])
412 bheads = partial.setdefault(branch, [])
413 bheads.extend(newnodes)
413 bheads.extend(newnodes)
414 if len(bheads) < 2:
414 if len(bheads) < 2:
415 continue
415 continue
416 newbheads = []
416 newbheads = []
417 # starting from tip means fewer passes over reachable
417 # starting from tip means fewer passes over reachable
418 while newnodes:
418 while newnodes:
419 latest = newnodes.pop()
419 latest = newnodes.pop()
420 if latest not in bheads:
420 if latest not in bheads:
421 continue
421 continue
422 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
422 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
423 reachable = self.changelog.reachable(latest, minbhrev)
423 reachable = self.changelog.reachable(latest, minbhrev)
424 bheads = [b for b in bheads if b not in reachable]
424 bheads = [b for b in bheads if b not in reachable]
425 newbheads.insert(0, latest)
425 newbheads.insert(0, latest)
426 bheads.extend(newbheads)
426 bheads.extend(newbheads)
427 partial[branch] = bheads
427 partial[branch] = bheads
428
428
429 def lookup(self, key):
429 def lookup(self, key):
430 if isinstance(key, int):
430 if isinstance(key, int):
431 return self.changelog.node(key)
431 return self.changelog.node(key)
432 elif key == '.':
432 elif key == '.':
433 return self.dirstate.parents()[0]
433 return self.dirstate.parents()[0]
434 elif key == 'null':
434 elif key == 'null':
435 return nullid
435 return nullid
436 elif key == 'tip':
436 elif key == 'tip':
437 return self.changelog.tip()
437 return self.changelog.tip()
438 n = self.changelog._match(key)
438 n = self.changelog._match(key)
439 if n:
439 if n:
440 return n
440 return n
441 if key in self.tags():
441 if key in self.tags():
442 return self.tags()[key]
442 return self.tags()[key]
443 if key in self.branchtags():
443 if key in self.branchtags():
444 return self.branchtags()[key]
444 return self.branchtags()[key]
445 n = self.changelog._partialmatch(key)
445 n = self.changelog._partialmatch(key)
446 if n:
446 if n:
447 return n
447 return n
448
448
449 # can't find key, check if it might have come from damaged dirstate
449 # can't find key, check if it might have come from damaged dirstate
450 if key in self.dirstate.parents():
450 if key in self.dirstate.parents():
451 raise error.Abort(_("working directory has unknown parent '%s'!")
451 raise error.Abort(_("working directory has unknown parent '%s'!")
452 % short(key))
452 % short(key))
453 try:
453 try:
454 if len(key) == 20:
454 if len(key) == 20:
455 key = hex(key)
455 key = hex(key)
456 except:
456 except:
457 pass
457 pass
458 raise error.RepoLookupError(_("unknown revision '%s'") % key)
458 raise error.RepoLookupError(_("unknown revision '%s'") % key)
459
459
460 def local(self):
460 def local(self):
461 return True
461 return True
462
462
463 def join(self, f):
463 def join(self, f):
464 return os.path.join(self.path, f)
464 return os.path.join(self.path, f)
465
465
466 def wjoin(self, f):
466 def wjoin(self, f):
467 return os.path.join(self.root, f)
467 return os.path.join(self.root, f)
468
468
469 def rjoin(self, f):
469 def rjoin(self, f):
470 return os.path.join(self.root, util.pconvert(f))
470 return os.path.join(self.root, util.pconvert(f))
471
471
472 def file(self, f):
472 def file(self, f):
473 if f[0] == '/':
473 if f[0] == '/':
474 f = f[1:]
474 f = f[1:]
475 return filelog.filelog(self.sopener, f)
475 return filelog.filelog(self.sopener, f)
476
476
477 def changectx(self, changeid):
477 def changectx(self, changeid):
478 return self[changeid]
478 return self[changeid]
479
479
480 def parents(self, changeid=None):
480 def parents(self, changeid=None):
481 '''get list of changectxs for parents of changeid'''
481 '''get list of changectxs for parents of changeid'''
482 return self[changeid].parents()
482 return self[changeid].parents()
483
483
484 def filectx(self, path, changeid=None, fileid=None):
484 def filectx(self, path, changeid=None, fileid=None):
485 """changeid can be a changeset revision, node, or tag.
485 """changeid can be a changeset revision, node, or tag.
486 fileid can be a file revision or node."""
486 fileid can be a file revision or node."""
487 return context.filectx(self, path, changeid, fileid)
487 return context.filectx(self, path, changeid, fileid)
488
488
489 def getcwd(self):
489 def getcwd(self):
490 return self.dirstate.getcwd()
490 return self.dirstate.getcwd()
491
491
492 def pathto(self, f, cwd=None):
492 def pathto(self, f, cwd=None):
493 return self.dirstate.pathto(f, cwd)
493 return self.dirstate.pathto(f, cwd)
494
494
495 def wfile(self, f, mode='r'):
495 def wfile(self, f, mode='r'):
496 return self.wopener(f, mode)
496 return self.wopener(f, mode)
497
497
498 def _link(self, f):
498 def _link(self, f):
499 return os.path.islink(self.wjoin(f))
499 return os.path.islink(self.wjoin(f))
500
500
501 def _filter(self, filter, filename, data):
501 def _filter(self, filter, filename, data):
502 if filter not in self.filterpats:
502 if filter not in self.filterpats:
503 l = []
503 l = []
504 for pat, cmd in self.ui.configitems(filter):
504 for pat, cmd in self.ui.configitems(filter):
505 if cmd == '!':
505 if cmd == '!':
506 continue
506 continue
507 mf = match_.match(self.root, '', [pat])
507 mf = match_.match(self.root, '', [pat])
508 fn = None
508 fn = None
509 params = cmd
509 params = cmd
510 for name, filterfn in self._datafilters.iteritems():
510 for name, filterfn in self._datafilters.iteritems():
511 if cmd.startswith(name):
511 if cmd.startswith(name):
512 fn = filterfn
512 fn = filterfn
513 params = cmd[len(name):].lstrip()
513 params = cmd[len(name):].lstrip()
514 break
514 break
515 if not fn:
515 if not fn:
516 fn = lambda s, c, **kwargs: util.filter(s, c)
516 fn = lambda s, c, **kwargs: util.filter(s, c)
517 # Wrap old filters not supporting keyword arguments
517 # Wrap old filters not supporting keyword arguments
518 if not inspect.getargspec(fn)[2]:
518 if not inspect.getargspec(fn)[2]:
519 oldfn = fn
519 oldfn = fn
520 fn = lambda s, c, **kwargs: oldfn(s, c)
520 fn = lambda s, c, **kwargs: oldfn(s, c)
521 l.append((mf, fn, params))
521 l.append((mf, fn, params))
522 self.filterpats[filter] = l
522 self.filterpats[filter] = l
523
523
524 for mf, fn, cmd in self.filterpats[filter]:
524 for mf, fn, cmd in self.filterpats[filter]:
525 if mf(filename):
525 if mf(filename):
526 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
526 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
527 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
527 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
528 break
528 break
529
529
530 return data
530 return data
531
531
532 def adddatafilter(self, name, filter):
532 def adddatafilter(self, name, filter):
533 self._datafilters[name] = filter
533 self._datafilters[name] = filter
534
534
535 def wread(self, filename):
535 def wread(self, filename):
536 if self._link(filename):
536 if self._link(filename):
537 data = os.readlink(self.wjoin(filename))
537 data = os.readlink(self.wjoin(filename))
538 else:
538 else:
539 data = self.wopener(filename, 'r').read()
539 data = self.wopener(filename, 'r').read()
540 return self._filter("encode", filename, data)
540 return self._filter("encode", filename, data)
541
541
542 def wwrite(self, filename, data, flags):
542 def wwrite(self, filename, data, flags):
543 data = self._filter("decode", filename, data)
543 data = self._filter("decode", filename, data)
544 try:
544 try:
545 os.unlink(self.wjoin(filename))
545 os.unlink(self.wjoin(filename))
546 except OSError:
546 except OSError:
547 pass
547 pass
548 if 'l' in flags:
548 if 'l' in flags:
549 self.wopener.symlink(data, filename)
549 self.wopener.symlink(data, filename)
550 else:
550 else:
551 self.wopener(filename, 'w').write(data)
551 self.wopener(filename, 'w').write(data)
552 if 'x' in flags:
552 if 'x' in flags:
553 util.set_flags(self.wjoin(filename), False, True)
553 util.set_flags(self.wjoin(filename), False, True)
554
554
555 def wwritedata(self, filename, data):
555 def wwritedata(self, filename, data):
556 return self._filter("decode", filename, data)
556 return self._filter("decode", filename, data)
557
557
558 def transaction(self):
558 def transaction(self):
559 tr = self._transref and self._transref() or None
559 tr = self._transref and self._transref() or None
560 if tr and tr.running():
560 if tr and tr.running():
561 return tr.nest()
561 return tr.nest()
562
562
563 # abort here if the journal already exists
563 # abort here if the journal already exists
564 if os.path.exists(self.sjoin("journal")):
564 if os.path.exists(self.sjoin("journal")):
565 raise error.RepoError(_("abandoned transaction found - run hg recover"))
565 raise error.RepoError(_("abandoned transaction found - run hg recover"))
566
566
567 # save dirstate for rollback
567 # save dirstate for rollback
568 try:
568 try:
569 ds = self.opener("dirstate").read()
569 ds = self.opener("dirstate").read()
570 except IOError:
570 except IOError:
571 ds = ""
571 ds = ""
572 self.opener("journal.dirstate", "w").write(ds)
572 self.opener("journal.dirstate", "w").write(ds)
573 self.opener("journal.branch", "w").write(self.dirstate.branch())
573 self.opener("journal.branch", "w").write(self.dirstate.branch())
574
574
575 renames = [(self.sjoin("journal"), self.sjoin("undo")),
575 renames = [(self.sjoin("journal"), self.sjoin("undo")),
576 (self.join("journal.dirstate"), self.join("undo.dirstate")),
576 (self.join("journal.dirstate"), self.join("undo.dirstate")),
577 (self.join("journal.branch"), self.join("undo.branch"))]
577 (self.join("journal.branch"), self.join("undo.branch"))]
578 tr = transaction.transaction(self.ui.warn, self.sopener,
578 tr = transaction.transaction(self.ui.warn, self.sopener,
579 self.sjoin("journal"),
579 self.sjoin("journal"),
580 aftertrans(renames),
580 aftertrans(renames),
581 self.store.createmode)
581 self.store.createmode)
582 self._transref = weakref.ref(tr)
582 self._transref = weakref.ref(tr)
583 return tr
583 return tr
584
584
585 def recover(self):
585 def recover(self):
586 lock = self.lock()
586 lock = self.lock()
587 try:
587 try:
588 if os.path.exists(self.sjoin("journal")):
588 if os.path.exists(self.sjoin("journal")):
589 self.ui.status(_("rolling back interrupted transaction\n"))
589 self.ui.status(_("rolling back interrupted transaction\n"))
590 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
590 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
591 self.invalidate()
591 self.invalidate()
592 return True
592 return True
593 else:
593 else:
594 self.ui.warn(_("no interrupted transaction available\n"))
594 self.ui.warn(_("no interrupted transaction available\n"))
595 return False
595 return False
596 finally:
596 finally:
597 lock.release()
597 lock.release()
598
598
599 def rollback(self):
599 def rollback(self):
600 wlock = lock = None
600 wlock = lock = None
601 try:
601 try:
602 wlock = self.wlock()
602 wlock = self.wlock()
603 lock = self.lock()
603 lock = self.lock()
604 if os.path.exists(self.sjoin("undo")):
604 if os.path.exists(self.sjoin("undo")):
605 self.ui.status(_("rolling back last transaction\n"))
605 self.ui.status(_("rolling back last transaction\n"))
606 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
606 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
607 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
607 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
608 try:
608 try:
609 branch = self.opener("undo.branch").read()
609 branch = self.opener("undo.branch").read()
610 self.dirstate.setbranch(branch)
610 self.dirstate.setbranch(branch)
611 except IOError:
611 except IOError:
612 self.ui.warn(_("Named branch could not be reset, "
612 self.ui.warn(_("Named branch could not be reset, "
613 "current branch still is: %s\n")
613 "current branch still is: %s\n")
614 % encoding.tolocal(self.dirstate.branch()))
614 % encoding.tolocal(self.dirstate.branch()))
615 self.invalidate()
615 self.invalidate()
616 self.dirstate.invalidate()
616 self.dirstate.invalidate()
617 self.destroyed()
617 self.destroyed()
618 else:
618 else:
619 self.ui.warn(_("no rollback information available\n"))
619 self.ui.warn(_("no rollback information available\n"))
620 finally:
620 finally:
621 release(lock, wlock)
621 release(lock, wlock)
622
622
623 def invalidate(self):
623 def invalidate(self):
624 for a in "changelog manifest".split():
624 for a in "changelog manifest".split():
625 if a in self.__dict__:
625 if a in self.__dict__:
626 delattr(self, a)
626 delattr(self, a)
627 self._tags = None
627 self._tags = None
628 self._tagtypes = None
628 self._tagtypes = None
629 self.nodetagscache = None
629 self.nodetagscache = None
630 self._branchcache = None # in UTF-8
630 self._branchcache = None # in UTF-8
631 self._branchcachetip = None
631 self._branchcachetip = None
632
632
633 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
633 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
634 try:
634 try:
635 l = lock.lock(lockname, 0, releasefn, desc=desc)
635 l = lock.lock(lockname, 0, releasefn, desc=desc)
636 except error.LockHeld, inst:
636 except error.LockHeld, inst:
637 if not wait:
637 if not wait:
638 raise
638 raise
639 self.ui.warn(_("waiting for lock on %s held by %r\n") %
639 self.ui.warn(_("waiting for lock on %s held by %r\n") %
640 (desc, inst.locker))
640 (desc, inst.locker))
641 # default to 600 seconds timeout
641 # default to 600 seconds timeout
642 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
642 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
643 releasefn, desc=desc)
643 releasefn, desc=desc)
644 if acquirefn:
644 if acquirefn:
645 acquirefn()
645 acquirefn()
646 return l
646 return l
647
647
648 def lock(self, wait=True):
648 def lock(self, wait=True):
649 '''Lock the repository store (.hg/store) and return a weak reference
649 '''Lock the repository store (.hg/store) and return a weak reference
650 to the lock. Use this before modifying the store (e.g. committing or
650 to the lock. Use this before modifying the store (e.g. committing or
651 stripping). If you are opening a transaction, get a lock as well.)'''
651 stripping). If you are opening a transaction, get a lock as well.)'''
652 l = self._lockref and self._lockref()
652 l = self._lockref and self._lockref()
653 if l is not None and l.held:
653 if l is not None and l.held:
654 l.lock()
654 l.lock()
655 return l
655 return l
656
656
657 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
657 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
658 _('repository %s') % self.origroot)
658 _('repository %s') % self.origroot)
659 self._lockref = weakref.ref(l)
659 self._lockref = weakref.ref(l)
660 return l
660 return l
661
661
662 def wlock(self, wait=True):
662 def wlock(self, wait=True):
663 '''Lock the non-store parts of the repository (everything under
663 '''Lock the non-store parts of the repository (everything under
664 .hg except .hg/store) and return a weak reference to the lock.
664 .hg except .hg/store) and return a weak reference to the lock.
665 Use this before modifying files in .hg.'''
665 Use this before modifying files in .hg.'''
666 l = self._wlockref and self._wlockref()
666 l = self._wlockref and self._wlockref()
667 if l is not None and l.held:
667 if l is not None and l.held:
668 l.lock()
668 l.lock()
669 return l
669 return l
670
670
671 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
671 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
672 self.dirstate.invalidate, _('working directory of %s') %
672 self.dirstate.invalidate, _('working directory of %s') %
673 self.origroot)
673 self.origroot)
674 self._wlockref = weakref.ref(l)
674 self._wlockref = weakref.ref(l)
675 return l
675 return l
676
676
677 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
677 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
678 """
678 """
679 commit an individual file as part of a larger transaction
679 commit an individual file as part of a larger transaction
680 """
680 """
681
681
682 fname = fctx.path()
682 fname = fctx.path()
683 text = fctx.data()
683 text = fctx.data()
684 flog = self.file(fname)
684 flog = self.file(fname)
685 fparent1 = manifest1.get(fname, nullid)
685 fparent1 = manifest1.get(fname, nullid)
686 fparent2 = fparent2o = manifest2.get(fname, nullid)
686 fparent2 = fparent2o = manifest2.get(fname, nullid)
687
687
688 meta = {}
688 meta = {}
689 copy = fctx.renamed()
689 copy = fctx.renamed()
690 if copy and copy[0] != fname:
690 if copy and copy[0] != fname:
691 # Mark the new revision of this file as a copy of another
691 # Mark the new revision of this file as a copy of another
692 # file. This copy data will effectively act as a parent
692 # file. This copy data will effectively act as a parent
693 # of this new revision. If this is a merge, the first
693 # of this new revision. If this is a merge, the first
694 # parent will be the nullid (meaning "look up the copy data")
694 # parent will be the nullid (meaning "look up the copy data")
695 # and the second one will be the other parent. For example:
695 # and the second one will be the other parent. For example:
696 #
696 #
697 # 0 --- 1 --- 3 rev1 changes file foo
697 # 0 --- 1 --- 3 rev1 changes file foo
698 # \ / rev2 renames foo to bar and changes it
698 # \ / rev2 renames foo to bar and changes it
699 # \- 2 -/ rev3 should have bar with all changes and
699 # \- 2 -/ rev3 should have bar with all changes and
700 # should record that bar descends from
700 # should record that bar descends from
701 # bar in rev2 and foo in rev1
701 # bar in rev2 and foo in rev1
702 #
702 #
703 # this allows this merge to succeed:
703 # this allows this merge to succeed:
704 #
704 #
705 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
705 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
706 # \ / merging rev3 and rev4 should use bar@rev2
706 # \ / merging rev3 and rev4 should use bar@rev2
707 # \- 2 --- 4 as the merge base
707 # \- 2 --- 4 as the merge base
708 #
708 #
709
709
710 cfname = copy[0]
710 cfname = copy[0]
711 crev = manifest1.get(cfname)
711 crev = manifest1.get(cfname)
712 newfparent = fparent2
712 newfparent = fparent2
713
713
714 if manifest2: # branch merge
714 if manifest2: # branch merge
715 if fparent2 == nullid or crev is None: # copied on remote side
715 if fparent2 == nullid or crev is None: # copied on remote side
716 if cfname in manifest2:
716 if cfname in manifest2:
717 crev = manifest2[cfname]
717 crev = manifest2[cfname]
718 newfparent = fparent1
718 newfparent = fparent1
719
719
720 # find source in nearest ancestor if we've lost track
720 # find source in nearest ancestor if we've lost track
721 if not crev:
721 if not crev:
722 self.ui.debug(" %s: searching for copy revision for %s\n" %
722 self.ui.debug(" %s: searching for copy revision for %s\n" %
723 (fname, cfname))
723 (fname, cfname))
724 for ancestor in self['.'].ancestors():
724 for ancestor in self['.'].ancestors():
725 if cfname in ancestor:
725 if cfname in ancestor:
726 crev = ancestor[cfname].filenode()
726 crev = ancestor[cfname].filenode()
727 break
727 break
728
728
729 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
729 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
730 meta["copy"] = cfname
730 meta["copy"] = cfname
731 meta["copyrev"] = hex(crev)
731 meta["copyrev"] = hex(crev)
732 fparent1, fparent2 = nullid, newfparent
732 fparent1, fparent2 = nullid, newfparent
733 elif fparent2 != nullid:
733 elif fparent2 != nullid:
734 # is one parent an ancestor of the other?
734 # is one parent an ancestor of the other?
735 fparentancestor = flog.ancestor(fparent1, fparent2)
735 fparentancestor = flog.ancestor(fparent1, fparent2)
736 if fparentancestor == fparent1:
736 if fparentancestor == fparent1:
737 fparent1, fparent2 = fparent2, nullid
737 fparent1, fparent2 = fparent2, nullid
738 elif fparentancestor == fparent2:
738 elif fparentancestor == fparent2:
739 fparent2 = nullid
739 fparent2 = nullid
740
740
741 # is the file changed?
741 # is the file changed?
742 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
742 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
743 changelist.append(fname)
743 changelist.append(fname)
744 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
744 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
745
745
746 # are just the flags changed during merge?
746 # are just the flags changed during merge?
747 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
747 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
748 changelist.append(fname)
748 changelist.append(fname)
749
749
750 return fparent1
750 return fparent1
751
751
752 def commit(self, text="", user=None, date=None, match=None, force=False,
752 def commit(self, text="", user=None, date=None, match=None, force=False,
753 editor=False, extra={}):
753 editor=False, extra={}):
754 """Add a new revision to current repository.
754 """Add a new revision to current repository.
755
755
756 Revision information is gathered from the working directory,
756 Revision information is gathered from the working directory,
757 match can be used to filter the committed files. If editor is
757 match can be used to filter the committed files. If editor is
758 supplied, it is called to get a commit message.
758 supplied, it is called to get a commit message.
759 """
759 """
760
760
761 def fail(f, msg):
761 def fail(f, msg):
762 raise util.Abort('%s: %s' % (f, msg))
762 raise util.Abort('%s: %s' % (f, msg))
763
763
764 if not match:
764 if not match:
765 match = match_.always(self.root, '')
765 match = match_.always(self.root, '')
766
766
767 if not force:
767 if not force:
768 vdirs = []
768 vdirs = []
769 match.dir = vdirs.append
769 match.dir = vdirs.append
770 match.bad = fail
770 match.bad = fail
771
771
772 wlock = self.wlock()
772 wlock = self.wlock()
773 try:
773 try:
774 p1, p2 = self.dirstate.parents()
774 p1, p2 = self.dirstate.parents()
775 wctx = self[None]
775 wctx = self[None]
776
776
777 if (not force and p2 != nullid and match and
777 if (not force and p2 != nullid and match and
778 (match.files() or match.anypats())):
778 (match.files() or match.anypats())):
779 raise util.Abort(_('cannot partially commit a merge '
779 raise util.Abort(_('cannot partially commit a merge '
780 '(do not specify files or patterns)'))
780 '(do not specify files or patterns)'))
781
781
782 changes = self.status(match=match, clean=force)
782 changes = self.status(match=match, clean=force)
783 if force:
783 if force:
784 changes[0].extend(changes[6]) # mq may commit unchanged files
784 changes[0].extend(changes[6]) # mq may commit unchanged files
785
785
786 # check subrepos
786 # check subrepos
787 subs = []
787 subs = []
788 for s in wctx.substate:
788 for s in wctx.substate:
789 if match(s) and wctx.sub(s).dirty():
789 if match(s) and wctx.sub(s).dirty():
790 subs.append(s)
790 subs.append(s)
791 if subs and '.hgsubstate' not in changes[0]:
791 if subs and '.hgsubstate' not in changes[0]:
792 changes[0].insert(0, '.hgsubstate')
792 changes[0].insert(0, '.hgsubstate')
793
793
794 # make sure all explicit patterns are matched
794 # make sure all explicit patterns are matched
795 if not force and match.files():
795 if not force and match.files():
796 matched = set(changes[0] + changes[1] + changes[2])
796 matched = set(changes[0] + changes[1] + changes[2])
797
797
798 for f in match.files():
798 for f in match.files():
799 if f == '.' or f in matched or f in wctx.substate:
799 if f == '.' or f in matched or f in wctx.substate:
800 continue
800 continue
801 if f in changes[3]: # missing
801 if f in changes[3]: # missing
802 fail(f, _('file not found!'))
802 fail(f, _('file not found!'))
803 if f in vdirs: # visited directory
803 if f in vdirs: # visited directory
804 d = f + '/'
804 d = f + '/'
805 for mf in matched:
805 for mf in matched:
806 if mf.startswith(d):
806 if mf.startswith(d):
807 break
807 break
808 else:
808 else:
809 fail(f, _("no match under directory!"))
809 fail(f, _("no match under directory!"))
810 elif f not in self.dirstate:
810 elif f not in self.dirstate:
811 fail(f, _("file not tracked!"))
811 fail(f, _("file not tracked!"))
812
812
813 if (not force and not extra.get("close") and p2 == nullid
813 if (not force and not extra.get("close") and p2 == nullid
814 and not (changes[0] or changes[1] or changes[2])
814 and not (changes[0] or changes[1] or changes[2])
815 and self[None].branch() == self['.'].branch()):
815 and self[None].branch() == self['.'].branch()):
816 return None
816 return None
817
817
818 ms = merge_.mergestate(self)
818 ms = merge_.mergestate(self)
819 for f in changes[0]:
819 for f in changes[0]:
820 if f in ms and ms[f] == 'u':
820 if f in ms and ms[f] == 'u':
821 raise util.Abort(_("unresolved merge conflicts "
821 raise util.Abort(_("unresolved merge conflicts "
822 "(see hg resolve)"))
822 "(see hg resolve)"))
823
823
824 cctx = context.workingctx(self, (p1, p2), text, user, date,
824 cctx = context.workingctx(self, (p1, p2), text, user, date,
825 extra, changes)
825 extra, changes)
826 if editor:
826 if editor:
827 cctx._text = editor(self, cctx, subs)
827 cctx._text = editor(self, cctx, subs)
828 edited = (text != cctx._text)
828
829
829 # commit subs
830 # commit subs
830 if subs:
831 if subs:
831 state = wctx.substate.copy()
832 state = wctx.substate.copy()
832 for s in subs:
833 for s in subs:
833 self.ui.status(_('committing subrepository %s\n') % s)
834 self.ui.status(_('committing subrepository %s\n') % s)
834 sr = wctx.sub(s).commit(cctx._text, user, date)
835 sr = wctx.sub(s).commit(cctx._text, user, date)
835 state[s] = (state[s][0], sr)
836 state[s] = (state[s][0], sr)
836 subrepo.writestate(self, state)
837 subrepo.writestate(self, state)
837
838
838 # Save commit message in case this transaction gets rolled back
839 # Save commit message in case this transaction gets rolled back
839 # (e.g. by a pretxncommit hook). (Save in text mode in case a
840 # (e.g. by a pretxncommit hook). (Save in text mode in case a
840 # Windows user wants to edit it with Notepad. Normalize
841 # Windows user wants to edit it with Notepad. Normalize
841 # trailing whitespace so the file always looks the same --
842 # trailing whitespace so the file always looks the same --
842 # makes testing easier.)
843 # makes testing easier.)
843 msgfile = self.opener('last-message.txt', 'w')
844 msgfile = self.opener('last-message.txt', 'w')
844 msgfile.write(cctx._text.rstrip() + '\n')
845 msgfile.write(cctx._text.rstrip() + '\n')
845 msgfile.close()
846 msgfile.close()
846
847
847 ret = self.commitctx(cctx, True)
848 try:
849 ret = self.commitctx(cctx, True)
850 except:
851 if edited:
852 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
853 self.ui.write(
854 _('note: commit message saved in %s\n') % msgfn)
855 raise
848
856
849 # update dirstate and mergestate
857 # update dirstate and mergestate
850 for f in changes[0] + changes[1]:
858 for f in changes[0] + changes[1]:
851 self.dirstate.normal(f)
859 self.dirstate.normal(f)
852 for f in changes[2]:
860 for f in changes[2]:
853 self.dirstate.forget(f)
861 self.dirstate.forget(f)
854 self.dirstate.setparents(ret)
862 self.dirstate.setparents(ret)
855 ms.reset()
863 ms.reset()
856
864
857 return ret
865 return ret
858
866
859 finally:
867 finally:
860 wlock.release()
868 wlock.release()
861
869
862 def commitctx(self, ctx, error=False):
870 def commitctx(self, ctx, error=False):
863 """Add a new revision to current repository.
871 """Add a new revision to current repository.
864
872
865 Revision information is passed via the context argument.
873 Revision information is passed via the context argument.
866 """
874 """
867
875
868 tr = lock = None
876 tr = lock = None
869 removed = ctx.removed()
877 removed = ctx.removed()
870 p1, p2 = ctx.p1(), ctx.p2()
878 p1, p2 = ctx.p1(), ctx.p2()
871 m1 = p1.manifest().copy()
879 m1 = p1.manifest().copy()
872 m2 = p2.manifest()
880 m2 = p2.manifest()
873 user = ctx.user()
881 user = ctx.user()
874
882
875 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
883 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
876 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
884 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
877
885
878 lock = self.lock()
886 lock = self.lock()
879 try:
887 try:
880 tr = self.transaction()
888 tr = self.transaction()
881 trp = weakref.proxy(tr)
889 trp = weakref.proxy(tr)
882
890
883 # check in files
891 # check in files
884 new = {}
892 new = {}
885 changed = []
893 changed = []
886 linkrev = len(self)
894 linkrev = len(self)
887 for f in sorted(ctx.modified() + ctx.added()):
895 for f in sorted(ctx.modified() + ctx.added()):
888 self.ui.note(f + "\n")
896 self.ui.note(f + "\n")
889 try:
897 try:
890 fctx = ctx[f]
898 fctx = ctx[f]
891 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
899 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
892 changed)
900 changed)
893 m1.set(f, fctx.flags())
901 m1.set(f, fctx.flags())
894 except (OSError, IOError):
902 except (OSError, IOError):
895 if error:
903 if error:
896 self.ui.warn(_("trouble committing %s!\n") % f)
904 self.ui.warn(_("trouble committing %s!\n") % f)
897 raise
905 raise
898 else:
906 else:
899 removed.append(f)
907 removed.append(f)
900
908
901 # update manifest
909 # update manifest
902 m1.update(new)
910 m1.update(new)
903 removed = [f for f in sorted(removed) if f in m1 or f in m2]
911 removed = [f for f in sorted(removed) if f in m1 or f in m2]
904 drop = [f for f in removed if f in m1]
912 drop = [f for f in removed if f in m1]
905 for f in drop:
913 for f in drop:
906 del m1[f]
914 del m1[f]
907 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
915 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
908 p2.manifestnode(), (new, drop))
916 p2.manifestnode(), (new, drop))
909
917
910 # update changelog
918 # update changelog
911 self.changelog.delayupdate()
919 self.changelog.delayupdate()
912 n = self.changelog.add(mn, changed + removed, ctx.description(),
920 n = self.changelog.add(mn, changed + removed, ctx.description(),
913 trp, p1.node(), p2.node(),
921 trp, p1.node(), p2.node(),
914 user, ctx.date(), ctx.extra().copy())
922 user, ctx.date(), ctx.extra().copy())
915 p = lambda: self.changelog.writepending() and self.root or ""
923 p = lambda: self.changelog.writepending() and self.root or ""
916 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
924 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
917 parent2=xp2, pending=p)
925 parent2=xp2, pending=p)
918 self.changelog.finalize(trp)
926 self.changelog.finalize(trp)
919 tr.close()
927 tr.close()
920
928
921 if self._branchcache:
929 if self._branchcache:
922 self.branchtags()
930 self.branchtags()
923
931
924 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
932 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
925 return n
933 return n
926 finally:
934 finally:
927 del tr
935 del tr
928 lock.release()
936 lock.release()
929
937
930 def destroyed(self):
938 def destroyed(self):
931 '''Inform the repository that nodes have been destroyed.
939 '''Inform the repository that nodes have been destroyed.
932 Intended for use by strip and rollback, so there's a common
940 Intended for use by strip and rollback, so there's a common
933 place for anything that has to be done after destroying history.'''
941 place for anything that has to be done after destroying history.'''
934 # XXX it might be nice if we could take the list of destroyed
942 # XXX it might be nice if we could take the list of destroyed
935 # nodes, but I don't see an easy way for rollback() to do that
943 # nodes, but I don't see an easy way for rollback() to do that
936
944
937 # Ensure the persistent tag cache is updated. Doing it now
945 # Ensure the persistent tag cache is updated. Doing it now
938 # means that the tag cache only has to worry about destroyed
946 # means that the tag cache only has to worry about destroyed
939 # heads immediately after a strip/rollback. That in turn
947 # heads immediately after a strip/rollback. That in turn
940 # guarantees that "cachetip == currenttip" (comparing both rev
948 # guarantees that "cachetip == currenttip" (comparing both rev
941 # and node) always means no nodes have been added or destroyed.
949 # and node) always means no nodes have been added or destroyed.
942
950
943 # XXX this is suboptimal when qrefresh'ing: we strip the current
951 # XXX this is suboptimal when qrefresh'ing: we strip the current
944 # head, refresh the tag cache, then immediately add a new head.
952 # head, refresh the tag cache, then immediately add a new head.
945 # But I think doing it this way is necessary for the "instant
953 # But I think doing it this way is necessary for the "instant
946 # tag cache retrieval" case to work.
954 # tag cache retrieval" case to work.
947 tags_.findglobaltags(self.ui, self, {}, {})
955 tags_.findglobaltags(self.ui, self, {}, {})
948
956
949 def walk(self, match, node=None):
957 def walk(self, match, node=None):
950 '''
958 '''
951 walk recursively through the directory tree or a given
959 walk recursively through the directory tree or a given
952 changeset, finding all files matched by the match
960 changeset, finding all files matched by the match
953 function
961 function
954 '''
962 '''
955 return self[node].walk(match)
963 return self[node].walk(match)
956
964
957 def status(self, node1='.', node2=None, match=None,
965 def status(self, node1='.', node2=None, match=None,
958 ignored=False, clean=False, unknown=False):
966 ignored=False, clean=False, unknown=False):
959 """return status of files between two nodes or node and working directory
967 """return status of files between two nodes or node and working directory
960
968
961 If node1 is None, use the first dirstate parent instead.
969 If node1 is None, use the first dirstate parent instead.
962 If node2 is None, compare node1 with working directory.
970 If node2 is None, compare node1 with working directory.
963 """
971 """
964
972
965 def mfmatches(ctx):
973 def mfmatches(ctx):
966 mf = ctx.manifest().copy()
974 mf = ctx.manifest().copy()
967 for fn in mf.keys():
975 for fn in mf.keys():
968 if not match(fn):
976 if not match(fn):
969 del mf[fn]
977 del mf[fn]
970 return mf
978 return mf
971
979
972 if isinstance(node1, context.changectx):
980 if isinstance(node1, context.changectx):
973 ctx1 = node1
981 ctx1 = node1
974 else:
982 else:
975 ctx1 = self[node1]
983 ctx1 = self[node1]
976 if isinstance(node2, context.changectx):
984 if isinstance(node2, context.changectx):
977 ctx2 = node2
985 ctx2 = node2
978 else:
986 else:
979 ctx2 = self[node2]
987 ctx2 = self[node2]
980
988
981 working = ctx2.rev() is None
989 working = ctx2.rev() is None
982 parentworking = working and ctx1 == self['.']
990 parentworking = working and ctx1 == self['.']
983 match = match or match_.always(self.root, self.getcwd())
991 match = match or match_.always(self.root, self.getcwd())
984 listignored, listclean, listunknown = ignored, clean, unknown
992 listignored, listclean, listunknown = ignored, clean, unknown
985
993
986 # load earliest manifest first for caching reasons
994 # load earliest manifest first for caching reasons
987 if not working and ctx2.rev() < ctx1.rev():
995 if not working and ctx2.rev() < ctx1.rev():
988 ctx2.manifest()
996 ctx2.manifest()
989
997
990 if not parentworking:
998 if not parentworking:
991 def bad(f, msg):
999 def bad(f, msg):
992 if f not in ctx1:
1000 if f not in ctx1:
993 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1001 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
994 match.bad = bad
1002 match.bad = bad
995
1003
996 if working: # we need to scan the working dir
1004 if working: # we need to scan the working dir
997 s = self.dirstate.status(match, listignored, listclean, listunknown)
1005 s = self.dirstate.status(match, listignored, listclean, listunknown)
998 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1006 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
999
1007
1000 # check for any possibly clean files
1008 # check for any possibly clean files
1001 if parentworking and cmp:
1009 if parentworking and cmp:
1002 fixup = []
1010 fixup = []
1003 # do a full compare of any files that might have changed
1011 # do a full compare of any files that might have changed
1004 for f in sorted(cmp):
1012 for f in sorted(cmp):
1005 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1013 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1006 or ctx1[f].cmp(ctx2[f].data())):
1014 or ctx1[f].cmp(ctx2[f].data())):
1007 modified.append(f)
1015 modified.append(f)
1008 else:
1016 else:
1009 fixup.append(f)
1017 fixup.append(f)
1010
1018
1011 if listclean:
1019 if listclean:
1012 clean += fixup
1020 clean += fixup
1013
1021
1014 # update dirstate for files that are actually clean
1022 # update dirstate for files that are actually clean
1015 if fixup:
1023 if fixup:
1016 try:
1024 try:
1017 # updating the dirstate is optional
1025 # updating the dirstate is optional
1018 # so we don't wait on the lock
1026 # so we don't wait on the lock
1019 wlock = self.wlock(False)
1027 wlock = self.wlock(False)
1020 try:
1028 try:
1021 for f in fixup:
1029 for f in fixup:
1022 self.dirstate.normal(f)
1030 self.dirstate.normal(f)
1023 finally:
1031 finally:
1024 wlock.release()
1032 wlock.release()
1025 except error.LockError:
1033 except error.LockError:
1026 pass
1034 pass
1027
1035
1028 if not parentworking:
1036 if not parentworking:
1029 mf1 = mfmatches(ctx1)
1037 mf1 = mfmatches(ctx1)
1030 if working:
1038 if working:
1031 # we are comparing working dir against non-parent
1039 # we are comparing working dir against non-parent
1032 # generate a pseudo-manifest for the working dir
1040 # generate a pseudo-manifest for the working dir
1033 mf2 = mfmatches(self['.'])
1041 mf2 = mfmatches(self['.'])
1034 for f in cmp + modified + added:
1042 for f in cmp + modified + added:
1035 mf2[f] = None
1043 mf2[f] = None
1036 mf2.set(f, ctx2.flags(f))
1044 mf2.set(f, ctx2.flags(f))
1037 for f in removed:
1045 for f in removed:
1038 if f in mf2:
1046 if f in mf2:
1039 del mf2[f]
1047 del mf2[f]
1040 else:
1048 else:
1041 # we are comparing two revisions
1049 # we are comparing two revisions
1042 deleted, unknown, ignored = [], [], []
1050 deleted, unknown, ignored = [], [], []
1043 mf2 = mfmatches(ctx2)
1051 mf2 = mfmatches(ctx2)
1044
1052
1045 modified, added, clean = [], [], []
1053 modified, added, clean = [], [], []
1046 for fn in mf2:
1054 for fn in mf2:
1047 if fn in mf1:
1055 if fn in mf1:
1048 if (mf1.flags(fn) != mf2.flags(fn) or
1056 if (mf1.flags(fn) != mf2.flags(fn) or
1049 (mf1[fn] != mf2[fn] and
1057 (mf1[fn] != mf2[fn] and
1050 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1058 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1051 modified.append(fn)
1059 modified.append(fn)
1052 elif listclean:
1060 elif listclean:
1053 clean.append(fn)
1061 clean.append(fn)
1054 del mf1[fn]
1062 del mf1[fn]
1055 else:
1063 else:
1056 added.append(fn)
1064 added.append(fn)
1057 removed = mf1.keys()
1065 removed = mf1.keys()
1058
1066
1059 r = modified, added, removed, deleted, unknown, ignored, clean
1067 r = modified, added, removed, deleted, unknown, ignored, clean
1060 [l.sort() for l in r]
1068 [l.sort() for l in r]
1061 return r
1069 return r
1062
1070
1063 def add(self, list):
1071 def add(self, list):
1064 wlock = self.wlock()
1072 wlock = self.wlock()
1065 try:
1073 try:
1066 rejected = []
1074 rejected = []
1067 for f in list:
1075 for f in list:
1068 p = self.wjoin(f)
1076 p = self.wjoin(f)
1069 try:
1077 try:
1070 st = os.lstat(p)
1078 st = os.lstat(p)
1071 except:
1079 except:
1072 self.ui.warn(_("%s does not exist!\n") % f)
1080 self.ui.warn(_("%s does not exist!\n") % f)
1073 rejected.append(f)
1081 rejected.append(f)
1074 continue
1082 continue
1075 if st.st_size > 10000000:
1083 if st.st_size > 10000000:
1076 self.ui.warn(_("%s: files over 10MB may cause memory and"
1084 self.ui.warn(_("%s: files over 10MB may cause memory and"
1077 " performance problems\n"
1085 " performance problems\n"
1078 "(use 'hg revert %s' to unadd the file)\n")
1086 "(use 'hg revert %s' to unadd the file)\n")
1079 % (f, f))
1087 % (f, f))
1080 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1088 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1081 self.ui.warn(_("%s not added: only files and symlinks "
1089 self.ui.warn(_("%s not added: only files and symlinks "
1082 "supported currently\n") % f)
1090 "supported currently\n") % f)
1083 rejected.append(p)
1091 rejected.append(p)
1084 elif self.dirstate[f] in 'amn':
1092 elif self.dirstate[f] in 'amn':
1085 self.ui.warn(_("%s already tracked!\n") % f)
1093 self.ui.warn(_("%s already tracked!\n") % f)
1086 elif self.dirstate[f] == 'r':
1094 elif self.dirstate[f] == 'r':
1087 self.dirstate.normallookup(f)
1095 self.dirstate.normallookup(f)
1088 else:
1096 else:
1089 self.dirstate.add(f)
1097 self.dirstate.add(f)
1090 return rejected
1098 return rejected
1091 finally:
1099 finally:
1092 wlock.release()
1100 wlock.release()
1093
1101
1094 def forget(self, list):
1102 def forget(self, list):
1095 wlock = self.wlock()
1103 wlock = self.wlock()
1096 try:
1104 try:
1097 for f in list:
1105 for f in list:
1098 if self.dirstate[f] != 'a':
1106 if self.dirstate[f] != 'a':
1099 self.ui.warn(_("%s not added!\n") % f)
1107 self.ui.warn(_("%s not added!\n") % f)
1100 else:
1108 else:
1101 self.dirstate.forget(f)
1109 self.dirstate.forget(f)
1102 finally:
1110 finally:
1103 wlock.release()
1111 wlock.release()
1104
1112
1105 def remove(self, list, unlink=False):
1113 def remove(self, list, unlink=False):
1106 if unlink:
1114 if unlink:
1107 for f in list:
1115 for f in list:
1108 try:
1116 try:
1109 util.unlink(self.wjoin(f))
1117 util.unlink(self.wjoin(f))
1110 except OSError, inst:
1118 except OSError, inst:
1111 if inst.errno != errno.ENOENT:
1119 if inst.errno != errno.ENOENT:
1112 raise
1120 raise
1113 wlock = self.wlock()
1121 wlock = self.wlock()
1114 try:
1122 try:
1115 for f in list:
1123 for f in list:
1116 if unlink and os.path.exists(self.wjoin(f)):
1124 if unlink and os.path.exists(self.wjoin(f)):
1117 self.ui.warn(_("%s still exists!\n") % f)
1125 self.ui.warn(_("%s still exists!\n") % f)
1118 elif self.dirstate[f] == 'a':
1126 elif self.dirstate[f] == 'a':
1119 self.dirstate.forget(f)
1127 self.dirstate.forget(f)
1120 elif f not in self.dirstate:
1128 elif f not in self.dirstate:
1121 self.ui.warn(_("%s not tracked!\n") % f)
1129 self.ui.warn(_("%s not tracked!\n") % f)
1122 else:
1130 else:
1123 self.dirstate.remove(f)
1131 self.dirstate.remove(f)
1124 finally:
1132 finally:
1125 wlock.release()
1133 wlock.release()
1126
1134
1127 def undelete(self, list):
1135 def undelete(self, list):
1128 manifests = [self.manifest.read(self.changelog.read(p)[0])
1136 manifests = [self.manifest.read(self.changelog.read(p)[0])
1129 for p in self.dirstate.parents() if p != nullid]
1137 for p in self.dirstate.parents() if p != nullid]
1130 wlock = self.wlock()
1138 wlock = self.wlock()
1131 try:
1139 try:
1132 for f in list:
1140 for f in list:
1133 if self.dirstate[f] != 'r':
1141 if self.dirstate[f] != 'r':
1134 self.ui.warn(_("%s not removed!\n") % f)
1142 self.ui.warn(_("%s not removed!\n") % f)
1135 else:
1143 else:
1136 m = f in manifests[0] and manifests[0] or manifests[1]
1144 m = f in manifests[0] and manifests[0] or manifests[1]
1137 t = self.file(f).read(m[f])
1145 t = self.file(f).read(m[f])
1138 self.wwrite(f, t, m.flags(f))
1146 self.wwrite(f, t, m.flags(f))
1139 self.dirstate.normal(f)
1147 self.dirstate.normal(f)
1140 finally:
1148 finally:
1141 wlock.release()
1149 wlock.release()
1142
1150
1143 def copy(self, source, dest):
1151 def copy(self, source, dest):
1144 p = self.wjoin(dest)
1152 p = self.wjoin(dest)
1145 if not (os.path.exists(p) or os.path.islink(p)):
1153 if not (os.path.exists(p) or os.path.islink(p)):
1146 self.ui.warn(_("%s does not exist!\n") % dest)
1154 self.ui.warn(_("%s does not exist!\n") % dest)
1147 elif not (os.path.isfile(p) or os.path.islink(p)):
1155 elif not (os.path.isfile(p) or os.path.islink(p)):
1148 self.ui.warn(_("copy failed: %s is not a file or a "
1156 self.ui.warn(_("copy failed: %s is not a file or a "
1149 "symbolic link\n") % dest)
1157 "symbolic link\n") % dest)
1150 else:
1158 else:
1151 wlock = self.wlock()
1159 wlock = self.wlock()
1152 try:
1160 try:
1153 if self.dirstate[dest] in '?r':
1161 if self.dirstate[dest] in '?r':
1154 self.dirstate.add(dest)
1162 self.dirstate.add(dest)
1155 self.dirstate.copy(source, dest)
1163 self.dirstate.copy(source, dest)
1156 finally:
1164 finally:
1157 wlock.release()
1165 wlock.release()
1158
1166
1159 def heads(self, start=None):
1167 def heads(self, start=None):
1160 heads = self.changelog.heads(start)
1168 heads = self.changelog.heads(start)
1161 # sort the output in rev descending order
1169 # sort the output in rev descending order
1162 heads = [(-self.changelog.rev(h), h) for h in heads]
1170 heads = [(-self.changelog.rev(h), h) for h in heads]
1163 return [n for (r, n) in sorted(heads)]
1171 return [n for (r, n) in sorted(heads)]
1164
1172
1165 def branchheads(self, branch=None, start=None, closed=False):
1173 def branchheads(self, branch=None, start=None, closed=False):
1166 '''return a (possibly filtered) list of heads for the given branch
1174 '''return a (possibly filtered) list of heads for the given branch
1167
1175
1168 Heads are returned in topological order, from newest to oldest.
1176 Heads are returned in topological order, from newest to oldest.
1169 If branch is None, use the dirstate branch.
1177 If branch is None, use the dirstate branch.
1170 If start is not None, return only heads reachable from start.
1178 If start is not None, return only heads reachable from start.
1171 If closed is True, return heads that are marked as closed as well.
1179 If closed is True, return heads that are marked as closed as well.
1172 '''
1180 '''
1173 if branch is None:
1181 if branch is None:
1174 branch = self[None].branch()
1182 branch = self[None].branch()
1175 branches = self.branchmap()
1183 branches = self.branchmap()
1176 if branch not in branches:
1184 if branch not in branches:
1177 return []
1185 return []
1178 # the cache returns heads ordered lowest to highest
1186 # the cache returns heads ordered lowest to highest
1179 bheads = list(reversed(branches[branch]))
1187 bheads = list(reversed(branches[branch]))
1180 if start is not None:
1188 if start is not None:
1181 # filter out the heads that cannot be reached from startrev
1189 # filter out the heads that cannot be reached from startrev
1182 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1190 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1183 bheads = [h for h in bheads if h in fbheads]
1191 bheads = [h for h in bheads if h in fbheads]
1184 if not closed:
1192 if not closed:
1185 bheads = [h for h in bheads if
1193 bheads = [h for h in bheads if
1186 ('close' not in self.changelog.read(h)[5])]
1194 ('close' not in self.changelog.read(h)[5])]
1187 return bheads
1195 return bheads
1188
1196
1189 def branches(self, nodes):
1197 def branches(self, nodes):
1190 if not nodes:
1198 if not nodes:
1191 nodes = [self.changelog.tip()]
1199 nodes = [self.changelog.tip()]
1192 b = []
1200 b = []
1193 for n in nodes:
1201 for n in nodes:
1194 t = n
1202 t = n
1195 while 1:
1203 while 1:
1196 p = self.changelog.parents(n)
1204 p = self.changelog.parents(n)
1197 if p[1] != nullid or p[0] == nullid:
1205 if p[1] != nullid or p[0] == nullid:
1198 b.append((t, n, p[0], p[1]))
1206 b.append((t, n, p[0], p[1]))
1199 break
1207 break
1200 n = p[0]
1208 n = p[0]
1201 return b
1209 return b
1202
1210
1203 def between(self, pairs):
1211 def between(self, pairs):
1204 r = []
1212 r = []
1205
1213
1206 for top, bottom in pairs:
1214 for top, bottom in pairs:
1207 n, l, i = top, [], 0
1215 n, l, i = top, [], 0
1208 f = 1
1216 f = 1
1209
1217
1210 while n != bottom and n != nullid:
1218 while n != bottom and n != nullid:
1211 p = self.changelog.parents(n)[0]
1219 p = self.changelog.parents(n)[0]
1212 if i == f:
1220 if i == f:
1213 l.append(n)
1221 l.append(n)
1214 f = f * 2
1222 f = f * 2
1215 n = p
1223 n = p
1216 i += 1
1224 i += 1
1217
1225
1218 r.append(l)
1226 r.append(l)
1219
1227
1220 return r
1228 return r
1221
1229
1222 def findincoming(self, remote, base=None, heads=None, force=False):
1230 def findincoming(self, remote, base=None, heads=None, force=False):
1223 """Return list of roots of the subsets of missing nodes from remote
1231 """Return list of roots of the subsets of missing nodes from remote
1224
1232
1225 If base dict is specified, assume that these nodes and their parents
1233 If base dict is specified, assume that these nodes and their parents
1226 exist on the remote side and that no child of a node of base exists
1234 exist on the remote side and that no child of a node of base exists
1227 in both remote and self.
1235 in both remote and self.
1228 Furthermore base will be updated to include the nodes that exists
1236 Furthermore base will be updated to include the nodes that exists
1229 in self and remote but no children exists in self and remote.
1237 in self and remote but no children exists in self and remote.
1230 If a list of heads is specified, return only nodes which are heads
1238 If a list of heads is specified, return only nodes which are heads
1231 or ancestors of these heads.
1239 or ancestors of these heads.
1232
1240
1233 All the ancestors of base are in self and in remote.
1241 All the ancestors of base are in self and in remote.
1234 All the descendants of the list returned are missing in self.
1242 All the descendants of the list returned are missing in self.
1235 (and so we know that the rest of the nodes are missing in remote, see
1243 (and so we know that the rest of the nodes are missing in remote, see
1236 outgoing)
1244 outgoing)
1237 """
1245 """
1238 return self.findcommonincoming(remote, base, heads, force)[1]
1246 return self.findcommonincoming(remote, base, heads, force)[1]
1239
1247
1240 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1248 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1241 """Return a tuple (common, missing roots, heads) used to identify
1249 """Return a tuple (common, missing roots, heads) used to identify
1242 missing nodes from remote.
1250 missing nodes from remote.
1243
1251
1244 If base dict is specified, assume that these nodes and their parents
1252 If base dict is specified, assume that these nodes and their parents
1245 exist on the remote side and that no child of a node of base exists
1253 exist on the remote side and that no child of a node of base exists
1246 in both remote and self.
1254 in both remote and self.
1247 Furthermore base will be updated to include the nodes that exists
1255 Furthermore base will be updated to include the nodes that exists
1248 in self and remote but no children exists in self and remote.
1256 in self and remote but no children exists in self and remote.
1249 If a list of heads is specified, return only nodes which are heads
1257 If a list of heads is specified, return only nodes which are heads
1250 or ancestors of these heads.
1258 or ancestors of these heads.
1251
1259
1252 All the ancestors of base are in self and in remote.
1260 All the ancestors of base are in self and in remote.
1253 """
1261 """
1254 m = self.changelog.nodemap
1262 m = self.changelog.nodemap
1255 search = []
1263 search = []
1256 fetch = set()
1264 fetch = set()
1257 seen = set()
1265 seen = set()
1258 seenbranch = set()
1266 seenbranch = set()
1259 if base is None:
1267 if base is None:
1260 base = {}
1268 base = {}
1261
1269
1262 if not heads:
1270 if not heads:
1263 heads = remote.heads()
1271 heads = remote.heads()
1264
1272
1265 if self.changelog.tip() == nullid:
1273 if self.changelog.tip() == nullid:
1266 base[nullid] = 1
1274 base[nullid] = 1
1267 if heads != [nullid]:
1275 if heads != [nullid]:
1268 return [nullid], [nullid], list(heads)
1276 return [nullid], [nullid], list(heads)
1269 return [nullid], [], []
1277 return [nullid], [], []
1270
1278
1271 # assume we're closer to the tip than the root
1279 # assume we're closer to the tip than the root
1272 # and start by examining the heads
1280 # and start by examining the heads
1273 self.ui.status(_("searching for changes\n"))
1281 self.ui.status(_("searching for changes\n"))
1274
1282
1275 unknown = []
1283 unknown = []
1276 for h in heads:
1284 for h in heads:
1277 if h not in m:
1285 if h not in m:
1278 unknown.append(h)
1286 unknown.append(h)
1279 else:
1287 else:
1280 base[h] = 1
1288 base[h] = 1
1281
1289
1282 heads = unknown
1290 heads = unknown
1283 if not unknown:
1291 if not unknown:
1284 return base.keys(), [], []
1292 return base.keys(), [], []
1285
1293
1286 req = set(unknown)
1294 req = set(unknown)
1287 reqcnt = 0
1295 reqcnt = 0
1288
1296
1289 # search through remote branches
1297 # search through remote branches
1290 # a 'branch' here is a linear segment of history, with four parts:
1298 # a 'branch' here is a linear segment of history, with four parts:
1291 # head, root, first parent, second parent
1299 # head, root, first parent, second parent
1292 # (a branch always has two parents (or none) by definition)
1300 # (a branch always has two parents (or none) by definition)
1293 unknown = remote.branches(unknown)
1301 unknown = remote.branches(unknown)
1294 while unknown:
1302 while unknown:
1295 r = []
1303 r = []
1296 while unknown:
1304 while unknown:
1297 n = unknown.pop(0)
1305 n = unknown.pop(0)
1298 if n[0] in seen:
1306 if n[0] in seen:
1299 continue
1307 continue
1300
1308
1301 self.ui.debug("examining %s:%s\n"
1309 self.ui.debug("examining %s:%s\n"
1302 % (short(n[0]), short(n[1])))
1310 % (short(n[0]), short(n[1])))
1303 if n[0] == nullid: # found the end of the branch
1311 if n[0] == nullid: # found the end of the branch
1304 pass
1312 pass
1305 elif n in seenbranch:
1313 elif n in seenbranch:
1306 self.ui.debug("branch already found\n")
1314 self.ui.debug("branch already found\n")
1307 continue
1315 continue
1308 elif n[1] and n[1] in m: # do we know the base?
1316 elif n[1] and n[1] in m: # do we know the base?
1309 self.ui.debug("found incomplete branch %s:%s\n"
1317 self.ui.debug("found incomplete branch %s:%s\n"
1310 % (short(n[0]), short(n[1])))
1318 % (short(n[0]), short(n[1])))
1311 search.append(n[0:2]) # schedule branch range for scanning
1319 search.append(n[0:2]) # schedule branch range for scanning
1312 seenbranch.add(n)
1320 seenbranch.add(n)
1313 else:
1321 else:
1314 if n[1] not in seen and n[1] not in fetch:
1322 if n[1] not in seen and n[1] not in fetch:
1315 if n[2] in m and n[3] in m:
1323 if n[2] in m and n[3] in m:
1316 self.ui.debug("found new changeset %s\n" %
1324 self.ui.debug("found new changeset %s\n" %
1317 short(n[1]))
1325 short(n[1]))
1318 fetch.add(n[1]) # earliest unknown
1326 fetch.add(n[1]) # earliest unknown
1319 for p in n[2:4]:
1327 for p in n[2:4]:
1320 if p in m:
1328 if p in m:
1321 base[p] = 1 # latest known
1329 base[p] = 1 # latest known
1322
1330
1323 for p in n[2:4]:
1331 for p in n[2:4]:
1324 if p not in req and p not in m:
1332 if p not in req and p not in m:
1325 r.append(p)
1333 r.append(p)
1326 req.add(p)
1334 req.add(p)
1327 seen.add(n[0])
1335 seen.add(n[0])
1328
1336
1329 if r:
1337 if r:
1330 reqcnt += 1
1338 reqcnt += 1
1331 self.ui.debug("request %d: %s\n" %
1339 self.ui.debug("request %d: %s\n" %
1332 (reqcnt, " ".join(map(short, r))))
1340 (reqcnt, " ".join(map(short, r))))
1333 for p in xrange(0, len(r), 10):
1341 for p in xrange(0, len(r), 10):
1334 for b in remote.branches(r[p:p+10]):
1342 for b in remote.branches(r[p:p+10]):
1335 self.ui.debug("received %s:%s\n" %
1343 self.ui.debug("received %s:%s\n" %
1336 (short(b[0]), short(b[1])))
1344 (short(b[0]), short(b[1])))
1337 unknown.append(b)
1345 unknown.append(b)
1338
1346
1339 # do binary search on the branches we found
1347 # do binary search on the branches we found
1340 while search:
1348 while search:
1341 newsearch = []
1349 newsearch = []
1342 reqcnt += 1
1350 reqcnt += 1
1343 for n, l in zip(search, remote.between(search)):
1351 for n, l in zip(search, remote.between(search)):
1344 l.append(n[1])
1352 l.append(n[1])
1345 p = n[0]
1353 p = n[0]
1346 f = 1
1354 f = 1
1347 for i in l:
1355 for i in l:
1348 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1356 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1349 if i in m:
1357 if i in m:
1350 if f <= 2:
1358 if f <= 2:
1351 self.ui.debug("found new branch changeset %s\n" %
1359 self.ui.debug("found new branch changeset %s\n" %
1352 short(p))
1360 short(p))
1353 fetch.add(p)
1361 fetch.add(p)
1354 base[i] = 1
1362 base[i] = 1
1355 else:
1363 else:
1356 self.ui.debug("narrowed branch search to %s:%s\n"
1364 self.ui.debug("narrowed branch search to %s:%s\n"
1357 % (short(p), short(i)))
1365 % (short(p), short(i)))
1358 newsearch.append((p, i))
1366 newsearch.append((p, i))
1359 break
1367 break
1360 p, f = i, f * 2
1368 p, f = i, f * 2
1361 search = newsearch
1369 search = newsearch
1362
1370
1363 # sanity check our fetch list
1371 # sanity check our fetch list
1364 for f in fetch:
1372 for f in fetch:
1365 if f in m:
1373 if f in m:
1366 raise error.RepoError(_("already have changeset ")
1374 raise error.RepoError(_("already have changeset ")
1367 + short(f[:4]))
1375 + short(f[:4]))
1368
1376
1369 if base.keys() == [nullid]:
1377 if base.keys() == [nullid]:
1370 if force:
1378 if force:
1371 self.ui.warn(_("warning: repository is unrelated\n"))
1379 self.ui.warn(_("warning: repository is unrelated\n"))
1372 else:
1380 else:
1373 raise util.Abort(_("repository is unrelated"))
1381 raise util.Abort(_("repository is unrelated"))
1374
1382
1375 self.ui.debug("found new changesets starting at " +
1383 self.ui.debug("found new changesets starting at " +
1376 " ".join([short(f) for f in fetch]) + "\n")
1384 " ".join([short(f) for f in fetch]) + "\n")
1377
1385
1378 self.ui.debug("%d total queries\n" % reqcnt)
1386 self.ui.debug("%d total queries\n" % reqcnt)
1379
1387
1380 return base.keys(), list(fetch), heads
1388 return base.keys(), list(fetch), heads
1381
1389
1382 def findoutgoing(self, remote, base=None, heads=None, force=False):
1390 def findoutgoing(self, remote, base=None, heads=None, force=False):
1383 """Return list of nodes that are roots of subsets not in remote
1391 """Return list of nodes that are roots of subsets not in remote
1384
1392
1385 If base dict is specified, assume that these nodes and their parents
1393 If base dict is specified, assume that these nodes and their parents
1386 exist on the remote side.
1394 exist on the remote side.
1387 If a list of heads is specified, return only nodes which are heads
1395 If a list of heads is specified, return only nodes which are heads
1388 or ancestors of these heads, and return a second element which
1396 or ancestors of these heads, and return a second element which
1389 contains all remote heads which get new children.
1397 contains all remote heads which get new children.
1390 """
1398 """
1391 if base is None:
1399 if base is None:
1392 base = {}
1400 base = {}
1393 self.findincoming(remote, base, heads, force=force)
1401 self.findincoming(remote, base, heads, force=force)
1394
1402
1395 self.ui.debug("common changesets up to "
1403 self.ui.debug("common changesets up to "
1396 + " ".join(map(short, base.keys())) + "\n")
1404 + " ".join(map(short, base.keys())) + "\n")
1397
1405
1398 remain = set(self.changelog.nodemap)
1406 remain = set(self.changelog.nodemap)
1399
1407
1400 # prune everything remote has from the tree
1408 # prune everything remote has from the tree
1401 remain.remove(nullid)
1409 remain.remove(nullid)
1402 remove = base.keys()
1410 remove = base.keys()
1403 while remove:
1411 while remove:
1404 n = remove.pop(0)
1412 n = remove.pop(0)
1405 if n in remain:
1413 if n in remain:
1406 remain.remove(n)
1414 remain.remove(n)
1407 for p in self.changelog.parents(n):
1415 for p in self.changelog.parents(n):
1408 remove.append(p)
1416 remove.append(p)
1409
1417
1410 # find every node whose parents have been pruned
1418 # find every node whose parents have been pruned
1411 subset = []
1419 subset = []
1412 # find every remote head that will get new children
1420 # find every remote head that will get new children
1413 updated_heads = set()
1421 updated_heads = set()
1414 for n in remain:
1422 for n in remain:
1415 p1, p2 = self.changelog.parents(n)
1423 p1, p2 = self.changelog.parents(n)
1416 if p1 not in remain and p2 not in remain:
1424 if p1 not in remain and p2 not in remain:
1417 subset.append(n)
1425 subset.append(n)
1418 if heads:
1426 if heads:
1419 if p1 in heads:
1427 if p1 in heads:
1420 updated_heads.add(p1)
1428 updated_heads.add(p1)
1421 if p2 in heads:
1429 if p2 in heads:
1422 updated_heads.add(p2)
1430 updated_heads.add(p2)
1423
1431
1424 # this is the set of all roots we have to push
1432 # this is the set of all roots we have to push
1425 if heads:
1433 if heads:
1426 return subset, list(updated_heads)
1434 return subset, list(updated_heads)
1427 else:
1435 else:
1428 return subset
1436 return subset
1429
1437
1430 def pull(self, remote, heads=None, force=False):
1438 def pull(self, remote, heads=None, force=False):
1431 lock = self.lock()
1439 lock = self.lock()
1432 try:
1440 try:
1433 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1441 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1434 force=force)
1442 force=force)
1435 if fetch == [nullid]:
1443 if fetch == [nullid]:
1436 self.ui.status(_("requesting all changes\n"))
1444 self.ui.status(_("requesting all changes\n"))
1437
1445
1438 if not fetch:
1446 if not fetch:
1439 self.ui.status(_("no changes found\n"))
1447 self.ui.status(_("no changes found\n"))
1440 return 0
1448 return 0
1441
1449
1442 if heads is None and remote.capable('changegroupsubset'):
1450 if heads is None and remote.capable('changegroupsubset'):
1443 heads = rheads
1451 heads = rheads
1444
1452
1445 if heads is None:
1453 if heads is None:
1446 cg = remote.changegroup(fetch, 'pull')
1454 cg = remote.changegroup(fetch, 'pull')
1447 else:
1455 else:
1448 if not remote.capable('changegroupsubset'):
1456 if not remote.capable('changegroupsubset'):
1449 raise util.Abort(_("Partial pull cannot be done because "
1457 raise util.Abort(_("Partial pull cannot be done because "
1450 "other repository doesn't support "
1458 "other repository doesn't support "
1451 "changegroupsubset."))
1459 "changegroupsubset."))
1452 cg = remote.changegroupsubset(fetch, heads, 'pull')
1460 cg = remote.changegroupsubset(fetch, heads, 'pull')
1453 return self.addchangegroup(cg, 'pull', remote.url())
1461 return self.addchangegroup(cg, 'pull', remote.url())
1454 finally:
1462 finally:
1455 lock.release()
1463 lock.release()
1456
1464
1457 def push(self, remote, force=False, revs=None):
1465 def push(self, remote, force=False, revs=None):
1458 # there are two ways to push to remote repo:
1466 # there are two ways to push to remote repo:
1459 #
1467 #
1460 # addchangegroup assumes local user can lock remote
1468 # addchangegroup assumes local user can lock remote
1461 # repo (local filesystem, old ssh servers).
1469 # repo (local filesystem, old ssh servers).
1462 #
1470 #
1463 # unbundle assumes local user cannot lock remote repo (new ssh
1471 # unbundle assumes local user cannot lock remote repo (new ssh
1464 # servers, http servers).
1472 # servers, http servers).
1465
1473
1466 if remote.capable('unbundle'):
1474 if remote.capable('unbundle'):
1467 return self.push_unbundle(remote, force, revs)
1475 return self.push_unbundle(remote, force, revs)
1468 return self.push_addchangegroup(remote, force, revs)
1476 return self.push_addchangegroup(remote, force, revs)
1469
1477
1470 def prepush(self, remote, force, revs):
1478 def prepush(self, remote, force, revs):
1471 '''Analyze the local and remote repositories and determine which
1479 '''Analyze the local and remote repositories and determine which
1472 changesets need to be pushed to the remote. Return a tuple
1480 changesets need to be pushed to the remote. Return a tuple
1473 (changegroup, remoteheads). changegroup is a readable file-like
1481 (changegroup, remoteheads). changegroup is a readable file-like
1474 object whose read() returns successive changegroup chunks ready to
1482 object whose read() returns successive changegroup chunks ready to
1475 be sent over the wire. remoteheads is the list of remote heads.
1483 be sent over the wire. remoteheads is the list of remote heads.
1476 '''
1484 '''
1477 common = {}
1485 common = {}
1478 remote_heads = remote.heads()
1486 remote_heads = remote.heads()
1479 inc = self.findincoming(remote, common, remote_heads, force=force)
1487 inc = self.findincoming(remote, common, remote_heads, force=force)
1480
1488
1481 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1489 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1482 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1490 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1483
1491
1484 def checkbranch(lheads, rheads, updatelb):
1492 def checkbranch(lheads, rheads, updatelb):
1485 '''
1493 '''
1486 check whether there are more local heads than remote heads on
1494 check whether there are more local heads than remote heads on
1487 a specific branch.
1495 a specific branch.
1488
1496
1489 lheads: local branch heads
1497 lheads: local branch heads
1490 rheads: remote branch heads
1498 rheads: remote branch heads
1491 updatelb: outgoing local branch bases
1499 updatelb: outgoing local branch bases
1492 '''
1500 '''
1493
1501
1494 warn = 0
1502 warn = 0
1495
1503
1496 if not revs and len(lheads) > len(rheads):
1504 if not revs and len(lheads) > len(rheads):
1497 warn = 1
1505 warn = 1
1498 else:
1506 else:
1499 # add local heads involved in the push
1507 # add local heads involved in the push
1500 updatelheads = [self.changelog.heads(x, lheads)
1508 updatelheads = [self.changelog.heads(x, lheads)
1501 for x in updatelb]
1509 for x in updatelb]
1502 newheads = set(sum(updatelheads, [])) & set(lheads)
1510 newheads = set(sum(updatelheads, [])) & set(lheads)
1503
1511
1504 if not newheads:
1512 if not newheads:
1505 return True
1513 return True
1506
1514
1507 # add heads we don't have or that are not involved in the push
1515 # add heads we don't have or that are not involved in the push
1508 for r in rheads:
1516 for r in rheads:
1509 if r in self.changelog.nodemap:
1517 if r in self.changelog.nodemap:
1510 desc = self.changelog.heads(r, heads)
1518 desc = self.changelog.heads(r, heads)
1511 l = [h for h in heads if h in desc]
1519 l = [h for h in heads if h in desc]
1512 if not l:
1520 if not l:
1513 newheads.add(r)
1521 newheads.add(r)
1514 else:
1522 else:
1515 newheads.add(r)
1523 newheads.add(r)
1516 if len(newheads) > len(rheads):
1524 if len(newheads) > len(rheads):
1517 warn = 1
1525 warn = 1
1518
1526
1519 if warn:
1527 if warn:
1520 if not rheads: # new branch requires --force
1528 if not rheads: # new branch requires --force
1521 self.ui.warn(_("abort: push creates new"
1529 self.ui.warn(_("abort: push creates new"
1522 " remote branch '%s'!\n") %
1530 " remote branch '%s'!\n") %
1523 self[updatelb[0]].branch())
1531 self[updatelb[0]].branch())
1524 else:
1532 else:
1525 self.ui.warn(_("abort: push creates new remote heads!\n"))
1533 self.ui.warn(_("abort: push creates new remote heads!\n"))
1526
1534
1527 self.ui.status(_("(did you forget to merge?"
1535 self.ui.status(_("(did you forget to merge?"
1528 " use push -f to force)\n"))
1536 " use push -f to force)\n"))
1529 return False
1537 return False
1530 return True
1538 return True
1531
1539
1532 if not bases:
1540 if not bases:
1533 self.ui.status(_("no changes found\n"))
1541 self.ui.status(_("no changes found\n"))
1534 return None, 1
1542 return None, 1
1535 elif not force:
1543 elif not force:
1536 # Check for each named branch if we're creating new remote heads.
1544 # Check for each named branch if we're creating new remote heads.
1537 # To be a remote head after push, node must be either:
1545 # To be a remote head after push, node must be either:
1538 # - unknown locally
1546 # - unknown locally
1539 # - a local outgoing head descended from update
1547 # - a local outgoing head descended from update
1540 # - a remote head that's known locally and not
1548 # - a remote head that's known locally and not
1541 # ancestral to an outgoing head
1549 # ancestral to an outgoing head
1542 #
1550 #
1543 # New named branches cannot be created without --force.
1551 # New named branches cannot be created without --force.
1544
1552
1545 if remote_heads != [nullid]:
1553 if remote_heads != [nullid]:
1546 if remote.capable('branchmap'):
1554 if remote.capable('branchmap'):
1547 localhds = {}
1555 localhds = {}
1548 if not revs:
1556 if not revs:
1549 localhds = self.branchmap()
1557 localhds = self.branchmap()
1550 else:
1558 else:
1551 for n in heads:
1559 for n in heads:
1552 branch = self[n].branch()
1560 branch = self[n].branch()
1553 if branch in localhds:
1561 if branch in localhds:
1554 localhds[branch].append(n)
1562 localhds[branch].append(n)
1555 else:
1563 else:
1556 localhds[branch] = [n]
1564 localhds[branch] = [n]
1557
1565
1558 remotehds = remote.branchmap()
1566 remotehds = remote.branchmap()
1559
1567
1560 for lh in localhds:
1568 for lh in localhds:
1561 if lh in remotehds:
1569 if lh in remotehds:
1562 rheads = remotehds[lh]
1570 rheads = remotehds[lh]
1563 else:
1571 else:
1564 rheads = []
1572 rheads = []
1565 lheads = localhds[lh]
1573 lheads = localhds[lh]
1566 updatelb = [upd for upd in update
1574 updatelb = [upd for upd in update
1567 if self[upd].branch() == lh]
1575 if self[upd].branch() == lh]
1568 if not updatelb:
1576 if not updatelb:
1569 continue
1577 continue
1570 if not checkbranch(lheads, rheads, updatelb):
1578 if not checkbranch(lheads, rheads, updatelb):
1571 return None, 0
1579 return None, 0
1572 else:
1580 else:
1573 if not checkbranch(heads, remote_heads, update):
1581 if not checkbranch(heads, remote_heads, update):
1574 return None, 0
1582 return None, 0
1575
1583
1576 if inc:
1584 if inc:
1577 self.ui.warn(_("note: unsynced remote changes!\n"))
1585 self.ui.warn(_("note: unsynced remote changes!\n"))
1578
1586
1579
1587
1580 if revs is None:
1588 if revs is None:
1581 # use the fast path, no race possible on push
1589 # use the fast path, no race possible on push
1582 nodes = self.changelog.findmissing(common.keys())
1590 nodes = self.changelog.findmissing(common.keys())
1583 cg = self._changegroup(nodes, 'push')
1591 cg = self._changegroup(nodes, 'push')
1584 else:
1592 else:
1585 cg = self.changegroupsubset(update, revs, 'push')
1593 cg = self.changegroupsubset(update, revs, 'push')
1586 return cg, remote_heads
1594 return cg, remote_heads
1587
1595
1588 def push_addchangegroup(self, remote, force, revs):
1596 def push_addchangegroup(self, remote, force, revs):
1589 lock = remote.lock()
1597 lock = remote.lock()
1590 try:
1598 try:
1591 ret = self.prepush(remote, force, revs)
1599 ret = self.prepush(remote, force, revs)
1592 if ret[0] is not None:
1600 if ret[0] is not None:
1593 cg, remote_heads = ret
1601 cg, remote_heads = ret
1594 return remote.addchangegroup(cg, 'push', self.url())
1602 return remote.addchangegroup(cg, 'push', self.url())
1595 return ret[1]
1603 return ret[1]
1596 finally:
1604 finally:
1597 lock.release()
1605 lock.release()
1598
1606
1599 def push_unbundle(self, remote, force, revs):
1607 def push_unbundle(self, remote, force, revs):
1600 # local repo finds heads on server, finds out what revs it
1608 # local repo finds heads on server, finds out what revs it
1601 # must push. once revs transferred, if server finds it has
1609 # must push. once revs transferred, if server finds it has
1602 # different heads (someone else won commit/push race), server
1610 # different heads (someone else won commit/push race), server
1603 # aborts.
1611 # aborts.
1604
1612
1605 ret = self.prepush(remote, force, revs)
1613 ret = self.prepush(remote, force, revs)
1606 if ret[0] is not None:
1614 if ret[0] is not None:
1607 cg, remote_heads = ret
1615 cg, remote_heads = ret
1608 if force: remote_heads = ['force']
1616 if force: remote_heads = ['force']
1609 return remote.unbundle(cg, remote_heads, 'push')
1617 return remote.unbundle(cg, remote_heads, 'push')
1610 return ret[1]
1618 return ret[1]
1611
1619
1612 def changegroupinfo(self, nodes, source):
1620 def changegroupinfo(self, nodes, source):
1613 if self.ui.verbose or source == 'bundle':
1621 if self.ui.verbose or source == 'bundle':
1614 self.ui.status(_("%d changesets found\n") % len(nodes))
1622 self.ui.status(_("%d changesets found\n") % len(nodes))
1615 if self.ui.debugflag:
1623 if self.ui.debugflag:
1616 self.ui.debug("list of changesets:\n")
1624 self.ui.debug("list of changesets:\n")
1617 for node in nodes:
1625 for node in nodes:
1618 self.ui.debug("%s\n" % hex(node))
1626 self.ui.debug("%s\n" % hex(node))
1619
1627
1620 def changegroupsubset(self, bases, heads, source, extranodes=None):
1628 def changegroupsubset(self, bases, heads, source, extranodes=None):
1621 """Compute a changegroup consisting of all the nodes that are
1629 """Compute a changegroup consisting of all the nodes that are
1622 descendents of any of the bases and ancestors of any of the heads.
1630 descendents of any of the bases and ancestors of any of the heads.
1623 Return a chunkbuffer object whose read() method will return
1631 Return a chunkbuffer object whose read() method will return
1624 successive changegroup chunks.
1632 successive changegroup chunks.
1625
1633
1626 It is fairly complex as determining which filenodes and which
1634 It is fairly complex as determining which filenodes and which
1627 manifest nodes need to be included for the changeset to be complete
1635 manifest nodes need to be included for the changeset to be complete
1628 is non-trivial.
1636 is non-trivial.
1629
1637
1630 Another wrinkle is doing the reverse, figuring out which changeset in
1638 Another wrinkle is doing the reverse, figuring out which changeset in
1631 the changegroup a particular filenode or manifestnode belongs to.
1639 the changegroup a particular filenode or manifestnode belongs to.
1632
1640
1633 The caller can specify some nodes that must be included in the
1641 The caller can specify some nodes that must be included in the
1634 changegroup using the extranodes argument. It should be a dict
1642 changegroup using the extranodes argument. It should be a dict
1635 where the keys are the filenames (or 1 for the manifest), and the
1643 where the keys are the filenames (or 1 for the manifest), and the
1636 values are lists of (node, linknode) tuples, where node is a wanted
1644 values are lists of (node, linknode) tuples, where node is a wanted
1637 node and linknode is the changelog node that should be transmitted as
1645 node and linknode is the changelog node that should be transmitted as
1638 the linkrev.
1646 the linkrev.
1639 """
1647 """
1640
1648
1641 # Set up some initial variables
1649 # Set up some initial variables
1642 # Make it easy to refer to self.changelog
1650 # Make it easy to refer to self.changelog
1643 cl = self.changelog
1651 cl = self.changelog
1644 # msng is short for missing - compute the list of changesets in this
1652 # msng is short for missing - compute the list of changesets in this
1645 # changegroup.
1653 # changegroup.
1646 if not bases:
1654 if not bases:
1647 bases = [nullid]
1655 bases = [nullid]
1648 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1656 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1649
1657
1650 if extranodes is None:
1658 if extranodes is None:
1651 # can we go through the fast path ?
1659 # can we go through the fast path ?
1652 heads.sort()
1660 heads.sort()
1653 allheads = self.heads()
1661 allheads = self.heads()
1654 allheads.sort()
1662 allheads.sort()
1655 if heads == allheads:
1663 if heads == allheads:
1656 return self._changegroup(msng_cl_lst, source)
1664 return self._changegroup(msng_cl_lst, source)
1657
1665
1658 # slow path
1666 # slow path
1659 self.hook('preoutgoing', throw=True, source=source)
1667 self.hook('preoutgoing', throw=True, source=source)
1660
1668
1661 self.changegroupinfo(msng_cl_lst, source)
1669 self.changegroupinfo(msng_cl_lst, source)
1662 # Some bases may turn out to be superfluous, and some heads may be
1670 # Some bases may turn out to be superfluous, and some heads may be
1663 # too. nodesbetween will return the minimal set of bases and heads
1671 # too. nodesbetween will return the minimal set of bases and heads
1664 # necessary to re-create the changegroup.
1672 # necessary to re-create the changegroup.
1665
1673
1666 # Known heads are the list of heads that it is assumed the recipient
1674 # Known heads are the list of heads that it is assumed the recipient
1667 # of this changegroup will know about.
1675 # of this changegroup will know about.
1668 knownheads = set()
1676 knownheads = set()
1669 # We assume that all parents of bases are known heads.
1677 # We assume that all parents of bases are known heads.
1670 for n in bases:
1678 for n in bases:
1671 knownheads.update(cl.parents(n))
1679 knownheads.update(cl.parents(n))
1672 knownheads.discard(nullid)
1680 knownheads.discard(nullid)
1673 knownheads = list(knownheads)
1681 knownheads = list(knownheads)
1674 if knownheads:
1682 if knownheads:
1675 # Now that we know what heads are known, we can compute which
1683 # Now that we know what heads are known, we can compute which
1676 # changesets are known. The recipient must know about all
1684 # changesets are known. The recipient must know about all
1677 # changesets required to reach the known heads from the null
1685 # changesets required to reach the known heads from the null
1678 # changeset.
1686 # changeset.
1679 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1687 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1680 junk = None
1688 junk = None
1681 # Transform the list into a set.
1689 # Transform the list into a set.
1682 has_cl_set = set(has_cl_set)
1690 has_cl_set = set(has_cl_set)
1683 else:
1691 else:
1684 # If there were no known heads, the recipient cannot be assumed to
1692 # If there were no known heads, the recipient cannot be assumed to
1685 # know about any changesets.
1693 # know about any changesets.
1686 has_cl_set = set()
1694 has_cl_set = set()
1687
1695
1688 # Make it easy to refer to self.manifest
1696 # Make it easy to refer to self.manifest
1689 mnfst = self.manifest
1697 mnfst = self.manifest
1690 # We don't know which manifests are missing yet
1698 # We don't know which manifests are missing yet
1691 msng_mnfst_set = {}
1699 msng_mnfst_set = {}
1692 # Nor do we know which filenodes are missing.
1700 # Nor do we know which filenodes are missing.
1693 msng_filenode_set = {}
1701 msng_filenode_set = {}
1694
1702
1695 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1703 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1696 junk = None
1704 junk = None
1697
1705
1698 # A changeset always belongs to itself, so the changenode lookup
1706 # A changeset always belongs to itself, so the changenode lookup
1699 # function for a changenode is identity.
1707 # function for a changenode is identity.
1700 def identity(x):
1708 def identity(x):
1701 return x
1709 return x
1702
1710
1703 # If we determine that a particular file or manifest node must be a
1711 # If we determine that a particular file or manifest node must be a
1704 # node that the recipient of the changegroup will already have, we can
1712 # node that the recipient of the changegroup will already have, we can
1705 # also assume the recipient will have all the parents. This function
1713 # also assume the recipient will have all the parents. This function
1706 # prunes them from the set of missing nodes.
1714 # prunes them from the set of missing nodes.
1707 def prune_parents(revlog, hasset, msngset):
1715 def prune_parents(revlog, hasset, msngset):
1708 haslst = list(hasset)
1716 haslst = list(hasset)
1709 haslst.sort(key=revlog.rev)
1717 haslst.sort(key=revlog.rev)
1710 for node in haslst:
1718 for node in haslst:
1711 parentlst = [p for p in revlog.parents(node) if p != nullid]
1719 parentlst = [p for p in revlog.parents(node) if p != nullid]
1712 while parentlst:
1720 while parentlst:
1713 n = parentlst.pop()
1721 n = parentlst.pop()
1714 if n not in hasset:
1722 if n not in hasset:
1715 hasset.add(n)
1723 hasset.add(n)
1716 p = [p for p in revlog.parents(n) if p != nullid]
1724 p = [p for p in revlog.parents(n) if p != nullid]
1717 parentlst.extend(p)
1725 parentlst.extend(p)
1718 for n in hasset:
1726 for n in hasset:
1719 msngset.pop(n, None)
1727 msngset.pop(n, None)
1720
1728
1721 # This is a function generating function used to set up an environment
1729 # This is a function generating function used to set up an environment
1722 # for the inner function to execute in.
1730 # for the inner function to execute in.
1723 def manifest_and_file_collector(changedfileset):
1731 def manifest_and_file_collector(changedfileset):
1724 # This is an information gathering function that gathers
1732 # This is an information gathering function that gathers
1725 # information from each changeset node that goes out as part of
1733 # information from each changeset node that goes out as part of
1726 # the changegroup. The information gathered is a list of which
1734 # the changegroup. The information gathered is a list of which
1727 # manifest nodes are potentially required (the recipient may
1735 # manifest nodes are potentially required (the recipient may
1728 # already have them) and total list of all files which were
1736 # already have them) and total list of all files which were
1729 # changed in any changeset in the changegroup.
1737 # changed in any changeset in the changegroup.
1730 #
1738 #
1731 # We also remember the first changenode we saw any manifest
1739 # We also remember the first changenode we saw any manifest
1732 # referenced by so we can later determine which changenode 'owns'
1740 # referenced by so we can later determine which changenode 'owns'
1733 # the manifest.
1741 # the manifest.
1734 def collect_manifests_and_files(clnode):
1742 def collect_manifests_and_files(clnode):
1735 c = cl.read(clnode)
1743 c = cl.read(clnode)
1736 for f in c[3]:
1744 for f in c[3]:
1737 # This is to make sure we only have one instance of each
1745 # This is to make sure we only have one instance of each
1738 # filename string for each filename.
1746 # filename string for each filename.
1739 changedfileset.setdefault(f, f)
1747 changedfileset.setdefault(f, f)
1740 msng_mnfst_set.setdefault(c[0], clnode)
1748 msng_mnfst_set.setdefault(c[0], clnode)
1741 return collect_manifests_and_files
1749 return collect_manifests_and_files
1742
1750
1743 # Figure out which manifest nodes (of the ones we think might be part
1751 # Figure out which manifest nodes (of the ones we think might be part
1744 # of the changegroup) the recipient must know about and remove them
1752 # of the changegroup) the recipient must know about and remove them
1745 # from the changegroup.
1753 # from the changegroup.
1746 def prune_manifests():
1754 def prune_manifests():
1747 has_mnfst_set = set()
1755 has_mnfst_set = set()
1748 for n in msng_mnfst_set:
1756 for n in msng_mnfst_set:
1749 # If a 'missing' manifest thinks it belongs to a changenode
1757 # If a 'missing' manifest thinks it belongs to a changenode
1750 # the recipient is assumed to have, obviously the recipient
1758 # the recipient is assumed to have, obviously the recipient
1751 # must have that manifest.
1759 # must have that manifest.
1752 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1760 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1753 if linknode in has_cl_set:
1761 if linknode in has_cl_set:
1754 has_mnfst_set.add(n)
1762 has_mnfst_set.add(n)
1755 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1763 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1756
1764
1757 # Use the information collected in collect_manifests_and_files to say
1765 # Use the information collected in collect_manifests_and_files to say
1758 # which changenode any manifestnode belongs to.
1766 # which changenode any manifestnode belongs to.
1759 def lookup_manifest_link(mnfstnode):
1767 def lookup_manifest_link(mnfstnode):
1760 return msng_mnfst_set[mnfstnode]
1768 return msng_mnfst_set[mnfstnode]
1761
1769
1762 # A function generating function that sets up the initial environment
1770 # A function generating function that sets up the initial environment
1763 # the inner function.
1771 # the inner function.
1764 def filenode_collector(changedfiles):
1772 def filenode_collector(changedfiles):
1765 next_rev = [0]
1773 next_rev = [0]
1766 # This gathers information from each manifestnode included in the
1774 # This gathers information from each manifestnode included in the
1767 # changegroup about which filenodes the manifest node references
1775 # changegroup about which filenodes the manifest node references
1768 # so we can include those in the changegroup too.
1776 # so we can include those in the changegroup too.
1769 #
1777 #
1770 # It also remembers which changenode each filenode belongs to. It
1778 # It also remembers which changenode each filenode belongs to. It
1771 # does this by assuming the a filenode belongs to the changenode
1779 # does this by assuming the a filenode belongs to the changenode
1772 # the first manifest that references it belongs to.
1780 # the first manifest that references it belongs to.
1773 def collect_msng_filenodes(mnfstnode):
1781 def collect_msng_filenodes(mnfstnode):
1774 r = mnfst.rev(mnfstnode)
1782 r = mnfst.rev(mnfstnode)
1775 if r == next_rev[0]:
1783 if r == next_rev[0]:
1776 # If the last rev we looked at was the one just previous,
1784 # If the last rev we looked at was the one just previous,
1777 # we only need to see a diff.
1785 # we only need to see a diff.
1778 deltamf = mnfst.readdelta(mnfstnode)
1786 deltamf = mnfst.readdelta(mnfstnode)
1779 # For each line in the delta
1787 # For each line in the delta
1780 for f, fnode in deltamf.iteritems():
1788 for f, fnode in deltamf.iteritems():
1781 f = changedfiles.get(f, None)
1789 f = changedfiles.get(f, None)
1782 # And if the file is in the list of files we care
1790 # And if the file is in the list of files we care
1783 # about.
1791 # about.
1784 if f is not None:
1792 if f is not None:
1785 # Get the changenode this manifest belongs to
1793 # Get the changenode this manifest belongs to
1786 clnode = msng_mnfst_set[mnfstnode]
1794 clnode = msng_mnfst_set[mnfstnode]
1787 # Create the set of filenodes for the file if
1795 # Create the set of filenodes for the file if
1788 # there isn't one already.
1796 # there isn't one already.
1789 ndset = msng_filenode_set.setdefault(f, {})
1797 ndset = msng_filenode_set.setdefault(f, {})
1790 # And set the filenode's changelog node to the
1798 # And set the filenode's changelog node to the
1791 # manifest's if it hasn't been set already.
1799 # manifest's if it hasn't been set already.
1792 ndset.setdefault(fnode, clnode)
1800 ndset.setdefault(fnode, clnode)
1793 else:
1801 else:
1794 # Otherwise we need a full manifest.
1802 # Otherwise we need a full manifest.
1795 m = mnfst.read(mnfstnode)
1803 m = mnfst.read(mnfstnode)
1796 # For every file in we care about.
1804 # For every file in we care about.
1797 for f in changedfiles:
1805 for f in changedfiles:
1798 fnode = m.get(f, None)
1806 fnode = m.get(f, None)
1799 # If it's in the manifest
1807 # If it's in the manifest
1800 if fnode is not None:
1808 if fnode is not None:
1801 # See comments above.
1809 # See comments above.
1802 clnode = msng_mnfst_set[mnfstnode]
1810 clnode = msng_mnfst_set[mnfstnode]
1803 ndset = msng_filenode_set.setdefault(f, {})
1811 ndset = msng_filenode_set.setdefault(f, {})
1804 ndset.setdefault(fnode, clnode)
1812 ndset.setdefault(fnode, clnode)
1805 # Remember the revision we hope to see next.
1813 # Remember the revision we hope to see next.
1806 next_rev[0] = r + 1
1814 next_rev[0] = r + 1
1807 return collect_msng_filenodes
1815 return collect_msng_filenodes
1808
1816
1809 # We have a list of filenodes we think we need for a file, lets remove
1817 # We have a list of filenodes we think we need for a file, lets remove
1810 # all those we know the recipient must have.
1818 # all those we know the recipient must have.
1811 def prune_filenodes(f, filerevlog):
1819 def prune_filenodes(f, filerevlog):
1812 msngset = msng_filenode_set[f]
1820 msngset = msng_filenode_set[f]
1813 hasset = set()
1821 hasset = set()
1814 # If a 'missing' filenode thinks it belongs to a changenode we
1822 # If a 'missing' filenode thinks it belongs to a changenode we
1815 # assume the recipient must have, then the recipient must have
1823 # assume the recipient must have, then the recipient must have
1816 # that filenode.
1824 # that filenode.
1817 for n in msngset:
1825 for n in msngset:
1818 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1826 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1819 if clnode in has_cl_set:
1827 if clnode in has_cl_set:
1820 hasset.add(n)
1828 hasset.add(n)
1821 prune_parents(filerevlog, hasset, msngset)
1829 prune_parents(filerevlog, hasset, msngset)
1822
1830
1823 # A function generator function that sets up the a context for the
1831 # A function generator function that sets up the a context for the
1824 # inner function.
1832 # inner function.
1825 def lookup_filenode_link_func(fname):
1833 def lookup_filenode_link_func(fname):
1826 msngset = msng_filenode_set[fname]
1834 msngset = msng_filenode_set[fname]
1827 # Lookup the changenode the filenode belongs to.
1835 # Lookup the changenode the filenode belongs to.
1828 def lookup_filenode_link(fnode):
1836 def lookup_filenode_link(fnode):
1829 return msngset[fnode]
1837 return msngset[fnode]
1830 return lookup_filenode_link
1838 return lookup_filenode_link
1831
1839
1832 # Add the nodes that were explicitly requested.
1840 # Add the nodes that were explicitly requested.
1833 def add_extra_nodes(name, nodes):
1841 def add_extra_nodes(name, nodes):
1834 if not extranodes or name not in extranodes:
1842 if not extranodes or name not in extranodes:
1835 return
1843 return
1836
1844
1837 for node, linknode in extranodes[name]:
1845 for node, linknode in extranodes[name]:
1838 if node not in nodes:
1846 if node not in nodes:
1839 nodes[node] = linknode
1847 nodes[node] = linknode
1840
1848
1841 # Now that we have all theses utility functions to help out and
1849 # Now that we have all theses utility functions to help out and
1842 # logically divide up the task, generate the group.
1850 # logically divide up the task, generate the group.
1843 def gengroup():
1851 def gengroup():
1844 # The set of changed files starts empty.
1852 # The set of changed files starts empty.
1845 changedfiles = {}
1853 changedfiles = {}
1846 # Create a changenode group generator that will call our functions
1854 # Create a changenode group generator that will call our functions
1847 # back to lookup the owning changenode and collect information.
1855 # back to lookup the owning changenode and collect information.
1848 group = cl.group(msng_cl_lst, identity,
1856 group = cl.group(msng_cl_lst, identity,
1849 manifest_and_file_collector(changedfiles))
1857 manifest_and_file_collector(changedfiles))
1850 for chnk in group:
1858 for chnk in group:
1851 yield chnk
1859 yield chnk
1852
1860
1853 # The list of manifests has been collected by the generator
1861 # The list of manifests has been collected by the generator
1854 # calling our functions back.
1862 # calling our functions back.
1855 prune_manifests()
1863 prune_manifests()
1856 add_extra_nodes(1, msng_mnfst_set)
1864 add_extra_nodes(1, msng_mnfst_set)
1857 msng_mnfst_lst = msng_mnfst_set.keys()
1865 msng_mnfst_lst = msng_mnfst_set.keys()
1858 # Sort the manifestnodes by revision number.
1866 # Sort the manifestnodes by revision number.
1859 msng_mnfst_lst.sort(key=mnfst.rev)
1867 msng_mnfst_lst.sort(key=mnfst.rev)
1860 # Create a generator for the manifestnodes that calls our lookup
1868 # Create a generator for the manifestnodes that calls our lookup
1861 # and data collection functions back.
1869 # and data collection functions back.
1862 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1870 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1863 filenode_collector(changedfiles))
1871 filenode_collector(changedfiles))
1864 for chnk in group:
1872 for chnk in group:
1865 yield chnk
1873 yield chnk
1866
1874
1867 # These are no longer needed, dereference and toss the memory for
1875 # These are no longer needed, dereference and toss the memory for
1868 # them.
1876 # them.
1869 msng_mnfst_lst = None
1877 msng_mnfst_lst = None
1870 msng_mnfst_set.clear()
1878 msng_mnfst_set.clear()
1871
1879
1872 if extranodes:
1880 if extranodes:
1873 for fname in extranodes:
1881 for fname in extranodes:
1874 if isinstance(fname, int):
1882 if isinstance(fname, int):
1875 continue
1883 continue
1876 msng_filenode_set.setdefault(fname, {})
1884 msng_filenode_set.setdefault(fname, {})
1877 changedfiles[fname] = 1
1885 changedfiles[fname] = 1
1878 # Go through all our files in order sorted by name.
1886 # Go through all our files in order sorted by name.
1879 for fname in sorted(changedfiles):
1887 for fname in sorted(changedfiles):
1880 filerevlog = self.file(fname)
1888 filerevlog = self.file(fname)
1881 if not len(filerevlog):
1889 if not len(filerevlog):
1882 raise util.Abort(_("empty or missing revlog for %s") % fname)
1890 raise util.Abort(_("empty or missing revlog for %s") % fname)
1883 # Toss out the filenodes that the recipient isn't really
1891 # Toss out the filenodes that the recipient isn't really
1884 # missing.
1892 # missing.
1885 if fname in msng_filenode_set:
1893 if fname in msng_filenode_set:
1886 prune_filenodes(fname, filerevlog)
1894 prune_filenodes(fname, filerevlog)
1887 add_extra_nodes(fname, msng_filenode_set[fname])
1895 add_extra_nodes(fname, msng_filenode_set[fname])
1888 msng_filenode_lst = msng_filenode_set[fname].keys()
1896 msng_filenode_lst = msng_filenode_set[fname].keys()
1889 else:
1897 else:
1890 msng_filenode_lst = []
1898 msng_filenode_lst = []
1891 # If any filenodes are left, generate the group for them,
1899 # If any filenodes are left, generate the group for them,
1892 # otherwise don't bother.
1900 # otherwise don't bother.
1893 if len(msng_filenode_lst) > 0:
1901 if len(msng_filenode_lst) > 0:
1894 yield changegroup.chunkheader(len(fname))
1902 yield changegroup.chunkheader(len(fname))
1895 yield fname
1903 yield fname
1896 # Sort the filenodes by their revision #
1904 # Sort the filenodes by their revision #
1897 msng_filenode_lst.sort(key=filerevlog.rev)
1905 msng_filenode_lst.sort(key=filerevlog.rev)
1898 # Create a group generator and only pass in a changenode
1906 # Create a group generator and only pass in a changenode
1899 # lookup function as we need to collect no information
1907 # lookup function as we need to collect no information
1900 # from filenodes.
1908 # from filenodes.
1901 group = filerevlog.group(msng_filenode_lst,
1909 group = filerevlog.group(msng_filenode_lst,
1902 lookup_filenode_link_func(fname))
1910 lookup_filenode_link_func(fname))
1903 for chnk in group:
1911 for chnk in group:
1904 yield chnk
1912 yield chnk
1905 if fname in msng_filenode_set:
1913 if fname in msng_filenode_set:
1906 # Don't need this anymore, toss it to free memory.
1914 # Don't need this anymore, toss it to free memory.
1907 del msng_filenode_set[fname]
1915 del msng_filenode_set[fname]
1908 # Signal that no more groups are left.
1916 # Signal that no more groups are left.
1909 yield changegroup.closechunk()
1917 yield changegroup.closechunk()
1910
1918
1911 if msng_cl_lst:
1919 if msng_cl_lst:
1912 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1920 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1913
1921
1914 return util.chunkbuffer(gengroup())
1922 return util.chunkbuffer(gengroup())
1915
1923
1916 def changegroup(self, basenodes, source):
1924 def changegroup(self, basenodes, source):
1917 # to avoid a race we use changegroupsubset() (issue1320)
1925 # to avoid a race we use changegroupsubset() (issue1320)
1918 return self.changegroupsubset(basenodes, self.heads(), source)
1926 return self.changegroupsubset(basenodes, self.heads(), source)
1919
1927
1920 def _changegroup(self, nodes, source):
1928 def _changegroup(self, nodes, source):
1921 """Compute the changegroup of all nodes that we have that a recipient
1929 """Compute the changegroup of all nodes that we have that a recipient
1922 doesn't. Return a chunkbuffer object whose read() method will return
1930 doesn't. Return a chunkbuffer object whose read() method will return
1923 successive changegroup chunks.
1931 successive changegroup chunks.
1924
1932
1925 This is much easier than the previous function as we can assume that
1933 This is much easier than the previous function as we can assume that
1926 the recipient has any changenode we aren't sending them.
1934 the recipient has any changenode we aren't sending them.
1927
1935
1928 nodes is the set of nodes to send"""
1936 nodes is the set of nodes to send"""
1929
1937
1930 self.hook('preoutgoing', throw=True, source=source)
1938 self.hook('preoutgoing', throw=True, source=source)
1931
1939
1932 cl = self.changelog
1940 cl = self.changelog
1933 revset = set([cl.rev(n) for n in nodes])
1941 revset = set([cl.rev(n) for n in nodes])
1934 self.changegroupinfo(nodes, source)
1942 self.changegroupinfo(nodes, source)
1935
1943
1936 def identity(x):
1944 def identity(x):
1937 return x
1945 return x
1938
1946
1939 def gennodelst(log):
1947 def gennodelst(log):
1940 for r in log:
1948 for r in log:
1941 if log.linkrev(r) in revset:
1949 if log.linkrev(r) in revset:
1942 yield log.node(r)
1950 yield log.node(r)
1943
1951
1944 def changed_file_collector(changedfileset):
1952 def changed_file_collector(changedfileset):
1945 def collect_changed_files(clnode):
1953 def collect_changed_files(clnode):
1946 c = cl.read(clnode)
1954 c = cl.read(clnode)
1947 changedfileset.update(c[3])
1955 changedfileset.update(c[3])
1948 return collect_changed_files
1956 return collect_changed_files
1949
1957
1950 def lookuprevlink_func(revlog):
1958 def lookuprevlink_func(revlog):
1951 def lookuprevlink(n):
1959 def lookuprevlink(n):
1952 return cl.node(revlog.linkrev(revlog.rev(n)))
1960 return cl.node(revlog.linkrev(revlog.rev(n)))
1953 return lookuprevlink
1961 return lookuprevlink
1954
1962
1955 def gengroup():
1963 def gengroup():
1956 '''yield a sequence of changegroup chunks (strings)'''
1964 '''yield a sequence of changegroup chunks (strings)'''
1957 # construct a list of all changed files
1965 # construct a list of all changed files
1958 changedfiles = set()
1966 changedfiles = set()
1959
1967
1960 for chnk in cl.group(nodes, identity,
1968 for chnk in cl.group(nodes, identity,
1961 changed_file_collector(changedfiles)):
1969 changed_file_collector(changedfiles)):
1962 yield chnk
1970 yield chnk
1963
1971
1964 mnfst = self.manifest
1972 mnfst = self.manifest
1965 nodeiter = gennodelst(mnfst)
1973 nodeiter = gennodelst(mnfst)
1966 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1974 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1967 yield chnk
1975 yield chnk
1968
1976
1969 for fname in sorted(changedfiles):
1977 for fname in sorted(changedfiles):
1970 filerevlog = self.file(fname)
1978 filerevlog = self.file(fname)
1971 if not len(filerevlog):
1979 if not len(filerevlog):
1972 raise util.Abort(_("empty or missing revlog for %s") % fname)
1980 raise util.Abort(_("empty or missing revlog for %s") % fname)
1973 nodeiter = gennodelst(filerevlog)
1981 nodeiter = gennodelst(filerevlog)
1974 nodeiter = list(nodeiter)
1982 nodeiter = list(nodeiter)
1975 if nodeiter:
1983 if nodeiter:
1976 yield changegroup.chunkheader(len(fname))
1984 yield changegroup.chunkheader(len(fname))
1977 yield fname
1985 yield fname
1978 lookup = lookuprevlink_func(filerevlog)
1986 lookup = lookuprevlink_func(filerevlog)
1979 for chnk in filerevlog.group(nodeiter, lookup):
1987 for chnk in filerevlog.group(nodeiter, lookup):
1980 yield chnk
1988 yield chnk
1981
1989
1982 yield changegroup.closechunk()
1990 yield changegroup.closechunk()
1983
1991
1984 if nodes:
1992 if nodes:
1985 self.hook('outgoing', node=hex(nodes[0]), source=source)
1993 self.hook('outgoing', node=hex(nodes[0]), source=source)
1986
1994
1987 return util.chunkbuffer(gengroup())
1995 return util.chunkbuffer(gengroup())
1988
1996
1989 def addchangegroup(self, source, srctype, url, emptyok=False):
1997 def addchangegroup(self, source, srctype, url, emptyok=False):
1990 """add changegroup to repo.
1998 """add changegroup to repo.
1991
1999
1992 return values:
2000 return values:
1993 - nothing changed or no source: 0
2001 - nothing changed or no source: 0
1994 - more heads than before: 1+added heads (2..n)
2002 - more heads than before: 1+added heads (2..n)
1995 - less heads than before: -1-removed heads (-2..-n)
2003 - less heads than before: -1-removed heads (-2..-n)
1996 - number of heads stays the same: 1
2004 - number of heads stays the same: 1
1997 """
2005 """
1998 def csmap(x):
2006 def csmap(x):
1999 self.ui.debug("add changeset %s\n" % short(x))
2007 self.ui.debug("add changeset %s\n" % short(x))
2000 return len(cl)
2008 return len(cl)
2001
2009
2002 def revmap(x):
2010 def revmap(x):
2003 return cl.rev(x)
2011 return cl.rev(x)
2004
2012
2005 if not source:
2013 if not source:
2006 return 0
2014 return 0
2007
2015
2008 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2016 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2009
2017
2010 changesets = files = revisions = 0
2018 changesets = files = revisions = 0
2011
2019
2012 # write changelog data to temp files so concurrent readers will not see
2020 # write changelog data to temp files so concurrent readers will not see
2013 # inconsistent view
2021 # inconsistent view
2014 cl = self.changelog
2022 cl = self.changelog
2015 cl.delayupdate()
2023 cl.delayupdate()
2016 oldheads = len(cl.heads())
2024 oldheads = len(cl.heads())
2017
2025
2018 tr = self.transaction()
2026 tr = self.transaction()
2019 try:
2027 try:
2020 trp = weakref.proxy(tr)
2028 trp = weakref.proxy(tr)
2021 # pull off the changeset group
2029 # pull off the changeset group
2022 self.ui.status(_("adding changesets\n"))
2030 self.ui.status(_("adding changesets\n"))
2023 clstart = len(cl)
2031 clstart = len(cl)
2024 chunkiter = changegroup.chunkiter(source)
2032 chunkiter = changegroup.chunkiter(source)
2025 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2033 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2026 raise util.Abort(_("received changelog group is empty"))
2034 raise util.Abort(_("received changelog group is empty"))
2027 clend = len(cl)
2035 clend = len(cl)
2028 changesets = clend - clstart
2036 changesets = clend - clstart
2029
2037
2030 # pull off the manifest group
2038 # pull off the manifest group
2031 self.ui.status(_("adding manifests\n"))
2039 self.ui.status(_("adding manifests\n"))
2032 chunkiter = changegroup.chunkiter(source)
2040 chunkiter = changegroup.chunkiter(source)
2033 # no need to check for empty manifest group here:
2041 # no need to check for empty manifest group here:
2034 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2042 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2035 # no new manifest will be created and the manifest group will
2043 # no new manifest will be created and the manifest group will
2036 # be empty during the pull
2044 # be empty during the pull
2037 self.manifest.addgroup(chunkiter, revmap, trp)
2045 self.manifest.addgroup(chunkiter, revmap, trp)
2038
2046
2039 # process the files
2047 # process the files
2040 self.ui.status(_("adding file changes\n"))
2048 self.ui.status(_("adding file changes\n"))
2041 while 1:
2049 while 1:
2042 f = changegroup.getchunk(source)
2050 f = changegroup.getchunk(source)
2043 if not f:
2051 if not f:
2044 break
2052 break
2045 self.ui.debug("adding %s revisions\n" % f)
2053 self.ui.debug("adding %s revisions\n" % f)
2046 fl = self.file(f)
2054 fl = self.file(f)
2047 o = len(fl)
2055 o = len(fl)
2048 chunkiter = changegroup.chunkiter(source)
2056 chunkiter = changegroup.chunkiter(source)
2049 if fl.addgroup(chunkiter, revmap, trp) is None:
2057 if fl.addgroup(chunkiter, revmap, trp) is None:
2050 raise util.Abort(_("received file revlog group is empty"))
2058 raise util.Abort(_("received file revlog group is empty"))
2051 revisions += len(fl) - o
2059 revisions += len(fl) - o
2052 files += 1
2060 files += 1
2053
2061
2054 newheads = len(cl.heads())
2062 newheads = len(cl.heads())
2055 heads = ""
2063 heads = ""
2056 if oldheads and newheads != oldheads:
2064 if oldheads and newheads != oldheads:
2057 heads = _(" (%+d heads)") % (newheads - oldheads)
2065 heads = _(" (%+d heads)") % (newheads - oldheads)
2058
2066
2059 self.ui.status(_("added %d changesets"
2067 self.ui.status(_("added %d changesets"
2060 " with %d changes to %d files%s\n")
2068 " with %d changes to %d files%s\n")
2061 % (changesets, revisions, files, heads))
2069 % (changesets, revisions, files, heads))
2062
2070
2063 if changesets > 0:
2071 if changesets > 0:
2064 p = lambda: cl.writepending() and self.root or ""
2072 p = lambda: cl.writepending() and self.root or ""
2065 self.hook('pretxnchangegroup', throw=True,
2073 self.hook('pretxnchangegroup', throw=True,
2066 node=hex(cl.node(clstart)), source=srctype,
2074 node=hex(cl.node(clstart)), source=srctype,
2067 url=url, pending=p)
2075 url=url, pending=p)
2068
2076
2069 # make changelog see real files again
2077 # make changelog see real files again
2070 cl.finalize(trp)
2078 cl.finalize(trp)
2071
2079
2072 tr.close()
2080 tr.close()
2073 finally:
2081 finally:
2074 del tr
2082 del tr
2075
2083
2076 if changesets > 0:
2084 if changesets > 0:
2077 # forcefully update the on-disk branch cache
2085 # forcefully update the on-disk branch cache
2078 self.ui.debug("updating the branch cache\n")
2086 self.ui.debug("updating the branch cache\n")
2079 self.branchtags()
2087 self.branchtags()
2080 self.hook("changegroup", node=hex(cl.node(clstart)),
2088 self.hook("changegroup", node=hex(cl.node(clstart)),
2081 source=srctype, url=url)
2089 source=srctype, url=url)
2082
2090
2083 for i in xrange(clstart, clend):
2091 for i in xrange(clstart, clend):
2084 self.hook("incoming", node=hex(cl.node(i)),
2092 self.hook("incoming", node=hex(cl.node(i)),
2085 source=srctype, url=url)
2093 source=srctype, url=url)
2086
2094
2087 # never return 0 here:
2095 # never return 0 here:
2088 if newheads < oldheads:
2096 if newheads < oldheads:
2089 return newheads - oldheads - 1
2097 return newheads - oldheads - 1
2090 else:
2098 else:
2091 return newheads - oldheads + 1
2099 return newheads - oldheads + 1
2092
2100
2093
2101
2094 def stream_in(self, remote):
2102 def stream_in(self, remote):
2095 fp = remote.stream_out()
2103 fp = remote.stream_out()
2096 l = fp.readline()
2104 l = fp.readline()
2097 try:
2105 try:
2098 resp = int(l)
2106 resp = int(l)
2099 except ValueError:
2107 except ValueError:
2100 raise error.ResponseError(
2108 raise error.ResponseError(
2101 _('Unexpected response from remote server:'), l)
2109 _('Unexpected response from remote server:'), l)
2102 if resp == 1:
2110 if resp == 1:
2103 raise util.Abort(_('operation forbidden by server'))
2111 raise util.Abort(_('operation forbidden by server'))
2104 elif resp == 2:
2112 elif resp == 2:
2105 raise util.Abort(_('locking the remote repository failed'))
2113 raise util.Abort(_('locking the remote repository failed'))
2106 elif resp != 0:
2114 elif resp != 0:
2107 raise util.Abort(_('the server sent an unknown error code'))
2115 raise util.Abort(_('the server sent an unknown error code'))
2108 self.ui.status(_('streaming all changes\n'))
2116 self.ui.status(_('streaming all changes\n'))
2109 l = fp.readline()
2117 l = fp.readline()
2110 try:
2118 try:
2111 total_files, total_bytes = map(int, l.split(' ', 1))
2119 total_files, total_bytes = map(int, l.split(' ', 1))
2112 except (ValueError, TypeError):
2120 except (ValueError, TypeError):
2113 raise error.ResponseError(
2121 raise error.ResponseError(
2114 _('Unexpected response from remote server:'), l)
2122 _('Unexpected response from remote server:'), l)
2115 self.ui.status(_('%d files to transfer, %s of data\n') %
2123 self.ui.status(_('%d files to transfer, %s of data\n') %
2116 (total_files, util.bytecount(total_bytes)))
2124 (total_files, util.bytecount(total_bytes)))
2117 start = time.time()
2125 start = time.time()
2118 for i in xrange(total_files):
2126 for i in xrange(total_files):
2119 # XXX doesn't support '\n' or '\r' in filenames
2127 # XXX doesn't support '\n' or '\r' in filenames
2120 l = fp.readline()
2128 l = fp.readline()
2121 try:
2129 try:
2122 name, size = l.split('\0', 1)
2130 name, size = l.split('\0', 1)
2123 size = int(size)
2131 size = int(size)
2124 except (ValueError, TypeError):
2132 except (ValueError, TypeError):
2125 raise error.ResponseError(
2133 raise error.ResponseError(
2126 _('Unexpected response from remote server:'), l)
2134 _('Unexpected response from remote server:'), l)
2127 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2135 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2128 # for backwards compat, name was partially encoded
2136 # for backwards compat, name was partially encoded
2129 ofp = self.sopener(store.decodedir(name), 'w')
2137 ofp = self.sopener(store.decodedir(name), 'w')
2130 for chunk in util.filechunkiter(fp, limit=size):
2138 for chunk in util.filechunkiter(fp, limit=size):
2131 ofp.write(chunk)
2139 ofp.write(chunk)
2132 ofp.close()
2140 ofp.close()
2133 elapsed = time.time() - start
2141 elapsed = time.time() - start
2134 if elapsed <= 0:
2142 if elapsed <= 0:
2135 elapsed = 0.001
2143 elapsed = 0.001
2136 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2144 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2137 (util.bytecount(total_bytes), elapsed,
2145 (util.bytecount(total_bytes), elapsed,
2138 util.bytecount(total_bytes / elapsed)))
2146 util.bytecount(total_bytes / elapsed)))
2139 self.invalidate()
2147 self.invalidate()
2140 return len(self.heads()) + 1
2148 return len(self.heads()) + 1
2141
2149
2142 def clone(self, remote, heads=[], stream=False):
2150 def clone(self, remote, heads=[], stream=False):
2143 '''clone remote repository.
2151 '''clone remote repository.
2144
2152
2145 keyword arguments:
2153 keyword arguments:
2146 heads: list of revs to clone (forces use of pull)
2154 heads: list of revs to clone (forces use of pull)
2147 stream: use streaming clone if possible'''
2155 stream: use streaming clone if possible'''
2148
2156
2149 # now, all clients that can request uncompressed clones can
2157 # now, all clients that can request uncompressed clones can
2150 # read repo formats supported by all servers that can serve
2158 # read repo formats supported by all servers that can serve
2151 # them.
2159 # them.
2152
2160
2153 # if revlog format changes, client will have to check version
2161 # if revlog format changes, client will have to check version
2154 # and format flags on "stream" capability, and use
2162 # and format flags on "stream" capability, and use
2155 # uncompressed only if compatible.
2163 # uncompressed only if compatible.
2156
2164
2157 if stream and not heads and remote.capable('stream'):
2165 if stream and not heads and remote.capable('stream'):
2158 return self.stream_in(remote)
2166 return self.stream_in(remote)
2159 return self.pull(remote, heads)
2167 return self.pull(remote, heads)
2160
2168
2161 # used to avoid circular references so destructors work
2169 # used to avoid circular references so destructors work
2162 def aftertrans(files):
2170 def aftertrans(files):
2163 renamefiles = [tuple(t) for t in files]
2171 renamefiles = [tuple(t) for t in files]
2164 def a():
2172 def a():
2165 for src, dest in renamefiles:
2173 for src, dest in renamefiles:
2166 util.rename(src, dest)
2174 util.rename(src, dest)
2167 return a
2175 return a
2168
2176
2169 def instance(ui, path, create):
2177 def instance(ui, path, create):
2170 return localrepository(ui, util.drop_scheme('file', path), create)
2178 return localrepository(ui, util.drop_scheme('file', path), create)
2171
2179
2172 def islocal(path):
2180 def islocal(path):
2173 return True
2181 return True
@@ -1,39 +1,48 b''
1 #!/bin/sh
1 #!/bin/sh
2
2
3 mkdir t
3 mkdir t
4 cd t
4 cd t
5 hg init
5 hg init
6 echo a > a
6 echo a > a
7 hg add a
7 hg add a
8 hg commit -m "test" -d "1000000 0"
8 hg commit -m "test" -d "1000000 0"
9 hg verify
9 hg verify
10 hg parents
10 hg parents
11 hg status
11 hg status
12 hg rollback
12 hg rollback
13 hg verify
13 hg verify
14 hg parents
14 hg parents
15 hg status
15 hg status
16
16
17 echo % Test issue 902
17 echo % Test issue 902
18 hg commit -m "test2"
18 hg commit -m "test2"
19 hg branch test
19 hg branch test
20 hg rollback
20 hg rollback
21 hg branch
21 hg branch
22
22
23 echo '% Test issue 1635 (commit message saved)'
23 echo '% Test issue 1635 (commit message saved)'
24 echo '.hg/last-message.txt:'
24 echo '.hg/last-message.txt:'
25 cat .hg/last-message.txt
25 cat .hg/last-message.txt
26
26
27 echo % Test rollback of hg before issue 902 was fixed
27 echo % Test rollback of hg before issue 902 was fixed
28 hg commit -m "test3"
28 hg commit -m "test3"
29 hg branch test
29 hg branch test
30 rm .hg/undo.branch
30 rm .hg/undo.branch
31 hg rollback
31 hg rollback
32 hg branch
32 hg branch
33
33
34 echo '% rollback by pretxncommit saves commit message (issue 1635)'
34 echo '% rollback by pretxncommit saves commit message (issue 1635)'
35 echo a >> a
35 echo a >> a
36 hg --config hooks.pretxncommit=/bin/false commit -m"precious commit message"
36 hg --config hooks.pretxncommit=/bin/false commit -m"precious commit message"
37
38 echo '.hg/last-message.txt:'
37 echo '.hg/last-message.txt:'
39 cat .hg/last-message.txt
38 cat .hg/last-message.txt
39
40 echo '% same thing, but run $EDITOR'
41 cat > $HGTMP/editor <<'__EOF__'
42 #!/bin/sh
43 echo "another precious commit message" > "$1"
44 __EOF__
45 chmod +x $HGTMP/editor
46 HGEDITOR=$HGTMP/editor hg --config hooks.pretxncommit=/bin/false commit
47 echo '.hg/last-message.txt:'
48 cat .hg/last-message.txt
@@ -1,36 +1,43 b''
1 checking changesets
1 checking changesets
2 checking manifests
2 checking manifests
3 crosschecking files in changesets and manifests
3 crosschecking files in changesets and manifests
4 checking files
4 checking files
5 1 files, 1 changesets, 1 total revisions
5 1 files, 1 changesets, 1 total revisions
6 changeset: 0:0acdaf898367
6 changeset: 0:0acdaf898367
7 tag: tip
7 tag: tip
8 user: test
8 user: test
9 date: Mon Jan 12 13:46:40 1970 +0000
9 date: Mon Jan 12 13:46:40 1970 +0000
10 summary: test
10 summary: test
11
11
12 rolling back last transaction
12 rolling back last transaction
13 checking changesets
13 checking changesets
14 checking manifests
14 checking manifests
15 crosschecking files in changesets and manifests
15 crosschecking files in changesets and manifests
16 checking files
16 checking files
17 0 files, 0 changesets, 0 total revisions
17 0 files, 0 changesets, 0 total revisions
18 A a
18 A a
19 % Test issue 902
19 % Test issue 902
20 marked working directory as branch test
20 marked working directory as branch test
21 rolling back last transaction
21 rolling back last transaction
22 default
22 default
23 % Test issue 1635 (commit message saved)
23 % Test issue 1635 (commit message saved)
24 .hg/last-message.txt:
24 .hg/last-message.txt:
25 test2
25 test2
26 % Test rollback of hg before issue 902 was fixed
26 % Test rollback of hg before issue 902 was fixed
27 marked working directory as branch test
27 marked working directory as branch test
28 rolling back last transaction
28 rolling back last transaction
29 Named branch could not be reset, current branch still is: test
29 Named branch could not be reset, current branch still is: test
30 test
30 test
31 % rollback by pretxncommit saves commit message (issue 1635)
31 % rollback by pretxncommit saves commit message (issue 1635)
32 transaction abort!
32 transaction abort!
33 rollback completed
33 rollback completed
34 abort: pretxncommit hook exited with status 1
34 abort: pretxncommit hook exited with status 1
35 .hg/last-message.txt:
35 .hg/last-message.txt:
36 precious commit message
36 precious commit message
37 % same thing, but run $EDITOR
38 transaction abort!
39 rollback completed
40 note: commit message saved in .hg/last-message.txt
41 abort: pretxncommit hook exited with status 1
42 .hg/last-message.txt:
43 another precious commit message
General Comments 0
You need to be logged in to leave comments. Login now