##// END OF EJS Templates
Show added files as "added" in editor commit message (issue 1330)
Patrick Mezard -
r7072:4e0d54fb default
parent child Browse files
Show More
@@ -1,2077 +1,2085 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context, weakref
11 import changelog, dirstate, filelog, manifest, context, weakref
12 import lock, transaction, stat, errno, ui, store
12 import lock, transaction, stat, errno, ui, store
13 import os, revlog, time, util, extensions, hook, inspect
13 import os, revlog, time, util, extensions, hook, inspect
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = util.set(('lookup', 'changegroupsubset'))
18 capabilities = util.set(('lookup', 'changegroupsubset'))
19 supported = ('revlogv1', 'store')
19 supported = ('revlogv1', 'store')
20
20
21 def __init__(self, parentui, path=None, create=0):
21 def __init__(self, parentui, path=None, create=0):
22 repo.repository.__init__(self)
22 repo.repository.__init__(self)
23 self.root = os.path.realpath(path)
23 self.root = os.path.realpath(path)
24 self.path = os.path.join(self.root, ".hg")
24 self.path = os.path.join(self.root, ".hg")
25 self.origroot = path
25 self.origroot = path
26 self.opener = util.opener(self.path)
26 self.opener = util.opener(self.path)
27 self.wopener = util.opener(self.root)
27 self.wopener = util.opener(self.root)
28
28
29 if not os.path.isdir(self.path):
29 if not os.path.isdir(self.path):
30 if create:
30 if create:
31 if not os.path.exists(path):
31 if not os.path.exists(path):
32 os.mkdir(path)
32 os.mkdir(path)
33 os.mkdir(self.path)
33 os.mkdir(self.path)
34 requirements = ["revlogv1"]
34 requirements = ["revlogv1"]
35 if parentui.configbool('format', 'usestore', True):
35 if parentui.configbool('format', 'usestore', True):
36 os.mkdir(os.path.join(self.path, "store"))
36 os.mkdir(os.path.join(self.path, "store"))
37 requirements.append("store")
37 requirements.append("store")
38 # create an invalid changelog
38 # create an invalid changelog
39 self.opener("00changelog.i", "a").write(
39 self.opener("00changelog.i", "a").write(
40 '\0\0\0\2' # represents revlogv2
40 '\0\0\0\2' # represents revlogv2
41 ' dummy changelog to prevent using the old repo layout'
41 ' dummy changelog to prevent using the old repo layout'
42 )
42 )
43 reqfile = self.opener("requires", "w")
43 reqfile = self.opener("requires", "w")
44 for r in requirements:
44 for r in requirements:
45 reqfile.write("%s\n" % r)
45 reqfile.write("%s\n" % r)
46 reqfile.close()
46 reqfile.close()
47 else:
47 else:
48 raise repo.RepoError(_("repository %s not found") % path)
48 raise repo.RepoError(_("repository %s not found") % path)
49 elif create:
49 elif create:
50 raise repo.RepoError(_("repository %s already exists") % path)
50 raise repo.RepoError(_("repository %s already exists") % path)
51 else:
51 else:
52 # find requirements
52 # find requirements
53 requirements = []
53 requirements = []
54 try:
54 try:
55 requirements = self.opener("requires").read().splitlines()
55 requirements = self.opener("requires").read().splitlines()
56 for r in requirements:
56 for r in requirements:
57 if r not in self.supported:
57 if r not in self.supported:
58 raise repo.RepoError(_("requirement '%s' not supported") % r)
58 raise repo.RepoError(_("requirement '%s' not supported") % r)
59 except IOError, inst:
59 except IOError, inst:
60 if inst.errno != errno.ENOENT:
60 if inst.errno != errno.ENOENT:
61 raise
61 raise
62
62
63 self.store = store.store(requirements, self.path, util.opener)
63 self.store = store.store(requirements, self.path, util.opener)
64 self.spath = self.store.path
64 self.spath = self.store.path
65 self.sopener = self.store.opener
65 self.sopener = self.store.opener
66 self.sjoin = self.store.join
66 self.sjoin = self.store.join
67 self.opener.createmode = self.store.createmode
67 self.opener.createmode = self.store.createmode
68
68
69 self.ui = ui.ui(parentui=parentui)
69 self.ui = ui.ui(parentui=parentui)
70 try:
70 try:
71 self.ui.readconfig(self.join("hgrc"), self.root)
71 self.ui.readconfig(self.join("hgrc"), self.root)
72 extensions.loadall(self.ui)
72 extensions.loadall(self.ui)
73 except IOError:
73 except IOError:
74 pass
74 pass
75
75
76 self.tagscache = None
76 self.tagscache = None
77 self._tagstypecache = None
77 self._tagstypecache = None
78 self.branchcache = None
78 self.branchcache = None
79 self._ubranchcache = None # UTF-8 version of branchcache
79 self._ubranchcache = None # UTF-8 version of branchcache
80 self._branchcachetip = None
80 self._branchcachetip = None
81 self.nodetagscache = None
81 self.nodetagscache = None
82 self.filterpats = {}
82 self.filterpats = {}
83 self._datafilters = {}
83 self._datafilters = {}
84 self._transref = self._lockref = self._wlockref = None
84 self._transref = self._lockref = self._wlockref = None
85
85
86 def __getattr__(self, name):
86 def __getattr__(self, name):
87 if name == 'changelog':
87 if name == 'changelog':
88 self.changelog = changelog.changelog(self.sopener)
88 self.changelog = changelog.changelog(self.sopener)
89 self.sopener.defversion = self.changelog.version
89 self.sopener.defversion = self.changelog.version
90 return self.changelog
90 return self.changelog
91 if name == 'manifest':
91 if name == 'manifest':
92 self.changelog
92 self.changelog
93 self.manifest = manifest.manifest(self.sopener)
93 self.manifest = manifest.manifest(self.sopener)
94 return self.manifest
94 return self.manifest
95 if name == 'dirstate':
95 if name == 'dirstate':
96 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
96 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
97 return self.dirstate
97 return self.dirstate
98 else:
98 else:
99 raise AttributeError(name)
99 raise AttributeError(name)
100
100
101 def __getitem__(self, changeid):
101 def __getitem__(self, changeid):
102 if changeid == None:
102 if changeid == None:
103 return context.workingctx(self)
103 return context.workingctx(self)
104 return context.changectx(self, changeid)
104 return context.changectx(self, changeid)
105
105
106 def __nonzero__(self):
106 def __nonzero__(self):
107 return True
107 return True
108
108
109 def __len__(self):
109 def __len__(self):
110 return len(self.changelog)
110 return len(self.changelog)
111
111
112 def __iter__(self):
112 def __iter__(self):
113 for i in xrange(len(self)):
113 for i in xrange(len(self)):
114 yield i
114 yield i
115
115
116 def url(self):
116 def url(self):
117 return 'file:' + self.root
117 return 'file:' + self.root
118
118
119 def hook(self, name, throw=False, **args):
119 def hook(self, name, throw=False, **args):
120 return hook.hook(self.ui, self, name, throw, **args)
120 return hook.hook(self.ui, self, name, throw, **args)
121
121
122 tag_disallowed = ':\r\n'
122 tag_disallowed = ':\r\n'
123
123
124 def _tag(self, names, node, message, local, user, date, parent=None,
124 def _tag(self, names, node, message, local, user, date, parent=None,
125 extra={}):
125 extra={}):
126 use_dirstate = parent is None
126 use_dirstate = parent is None
127
127
128 if isinstance(names, str):
128 if isinstance(names, str):
129 allchars = names
129 allchars = names
130 names = (names,)
130 names = (names,)
131 else:
131 else:
132 allchars = ''.join(names)
132 allchars = ''.join(names)
133 for c in self.tag_disallowed:
133 for c in self.tag_disallowed:
134 if c in allchars:
134 if c in allchars:
135 raise util.Abort(_('%r cannot be used in a tag name') % c)
135 raise util.Abort(_('%r cannot be used in a tag name') % c)
136
136
137 for name in names:
137 for name in names:
138 self.hook('pretag', throw=True, node=hex(node), tag=name,
138 self.hook('pretag', throw=True, node=hex(node), tag=name,
139 local=local)
139 local=local)
140
140
141 def writetags(fp, names, munge, prevtags):
141 def writetags(fp, names, munge, prevtags):
142 fp.seek(0, 2)
142 fp.seek(0, 2)
143 if prevtags and prevtags[-1] != '\n':
143 if prevtags and prevtags[-1] != '\n':
144 fp.write('\n')
144 fp.write('\n')
145 for name in names:
145 for name in names:
146 m = munge and munge(name) or name
146 m = munge and munge(name) or name
147 if self._tagstypecache and name in self._tagstypecache:
147 if self._tagstypecache and name in self._tagstypecache:
148 old = self.tagscache.get(name, nullid)
148 old = self.tagscache.get(name, nullid)
149 fp.write('%s %s\n' % (hex(old), m))
149 fp.write('%s %s\n' % (hex(old), m))
150 fp.write('%s %s\n' % (hex(node), m))
150 fp.write('%s %s\n' % (hex(node), m))
151 fp.close()
151 fp.close()
152
152
153 prevtags = ''
153 prevtags = ''
154 if local:
154 if local:
155 try:
155 try:
156 fp = self.opener('localtags', 'r+')
156 fp = self.opener('localtags', 'r+')
157 except IOError, err:
157 except IOError, err:
158 fp = self.opener('localtags', 'a')
158 fp = self.opener('localtags', 'a')
159 else:
159 else:
160 prevtags = fp.read()
160 prevtags = fp.read()
161
161
162 # local tags are stored in the current charset
162 # local tags are stored in the current charset
163 writetags(fp, names, None, prevtags)
163 writetags(fp, names, None, prevtags)
164 for name in names:
164 for name in names:
165 self.hook('tag', node=hex(node), tag=name, local=local)
165 self.hook('tag', node=hex(node), tag=name, local=local)
166 return
166 return
167
167
168 if use_dirstate:
168 if use_dirstate:
169 try:
169 try:
170 fp = self.wfile('.hgtags', 'rb+')
170 fp = self.wfile('.hgtags', 'rb+')
171 except IOError, err:
171 except IOError, err:
172 fp = self.wfile('.hgtags', 'ab')
172 fp = self.wfile('.hgtags', 'ab')
173 else:
173 else:
174 prevtags = fp.read()
174 prevtags = fp.read()
175 else:
175 else:
176 try:
176 try:
177 prevtags = self.filectx('.hgtags', parent).data()
177 prevtags = self.filectx('.hgtags', parent).data()
178 except revlog.LookupError:
178 except revlog.LookupError:
179 pass
179 pass
180 fp = self.wfile('.hgtags', 'wb')
180 fp = self.wfile('.hgtags', 'wb')
181 if prevtags:
181 if prevtags:
182 fp.write(prevtags)
182 fp.write(prevtags)
183
183
184 # committed tags are stored in UTF-8
184 # committed tags are stored in UTF-8
185 writetags(fp, names, util.fromlocal, prevtags)
185 writetags(fp, names, util.fromlocal, prevtags)
186
186
187 if use_dirstate and '.hgtags' not in self.dirstate:
187 if use_dirstate and '.hgtags' not in self.dirstate:
188 self.add(['.hgtags'])
188 self.add(['.hgtags'])
189
189
190 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
190 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
191 extra=extra)
191 extra=extra)
192
192
193 for name in names:
193 for name in names:
194 self.hook('tag', node=hex(node), tag=name, local=local)
194 self.hook('tag', node=hex(node), tag=name, local=local)
195
195
196 return tagnode
196 return tagnode
197
197
198 def tag(self, names, node, message, local, user, date):
198 def tag(self, names, node, message, local, user, date):
199 '''tag a revision with one or more symbolic names.
199 '''tag a revision with one or more symbolic names.
200
200
201 names is a list of strings or, when adding a single tag, names may be a
201 names is a list of strings or, when adding a single tag, names may be a
202 string.
202 string.
203
203
204 if local is True, the tags are stored in a per-repository file.
204 if local is True, the tags are stored in a per-repository file.
205 otherwise, they are stored in the .hgtags file, and a new
205 otherwise, they are stored in the .hgtags file, and a new
206 changeset is committed with the change.
206 changeset is committed with the change.
207
207
208 keyword arguments:
208 keyword arguments:
209
209
210 local: whether to store tags in non-version-controlled file
210 local: whether to store tags in non-version-controlled file
211 (default False)
211 (default False)
212
212
213 message: commit message to use if committing
213 message: commit message to use if committing
214
214
215 user: name of user to use if committing
215 user: name of user to use if committing
216
216
217 date: date tuple to use if committing'''
217 date: date tuple to use if committing'''
218
218
219 for x in self.status()[:5]:
219 for x in self.status()[:5]:
220 if '.hgtags' in x:
220 if '.hgtags' in x:
221 raise util.Abort(_('working copy of .hgtags is changed '
221 raise util.Abort(_('working copy of .hgtags is changed '
222 '(please commit .hgtags manually)'))
222 '(please commit .hgtags manually)'))
223
223
224 self._tag(names, node, message, local, user, date)
224 self._tag(names, node, message, local, user, date)
225
225
226 def tags(self):
226 def tags(self):
227 '''return a mapping of tag to node'''
227 '''return a mapping of tag to node'''
228 if self.tagscache:
228 if self.tagscache:
229 return self.tagscache
229 return self.tagscache
230
230
231 globaltags = {}
231 globaltags = {}
232 tagtypes = {}
232 tagtypes = {}
233
233
234 def readtags(lines, fn, tagtype):
234 def readtags(lines, fn, tagtype):
235 filetags = {}
235 filetags = {}
236 count = 0
236 count = 0
237
237
238 def warn(msg):
238 def warn(msg):
239 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
239 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
240
240
241 for l in lines:
241 for l in lines:
242 count += 1
242 count += 1
243 if not l:
243 if not l:
244 continue
244 continue
245 s = l.split(" ", 1)
245 s = l.split(" ", 1)
246 if len(s) != 2:
246 if len(s) != 2:
247 warn(_("cannot parse entry"))
247 warn(_("cannot parse entry"))
248 continue
248 continue
249 node, key = s
249 node, key = s
250 key = util.tolocal(key.strip()) # stored in UTF-8
250 key = util.tolocal(key.strip()) # stored in UTF-8
251 try:
251 try:
252 bin_n = bin(node)
252 bin_n = bin(node)
253 except TypeError:
253 except TypeError:
254 warn(_("node '%s' is not well formed") % node)
254 warn(_("node '%s' is not well formed") % node)
255 continue
255 continue
256 if bin_n not in self.changelog.nodemap:
256 if bin_n not in self.changelog.nodemap:
257 warn(_("tag '%s' refers to unknown node") % key)
257 warn(_("tag '%s' refers to unknown node") % key)
258 continue
258 continue
259
259
260 h = []
260 h = []
261 if key in filetags:
261 if key in filetags:
262 n, h = filetags[key]
262 n, h = filetags[key]
263 h.append(n)
263 h.append(n)
264 filetags[key] = (bin_n, h)
264 filetags[key] = (bin_n, h)
265
265
266 for k, nh in filetags.items():
266 for k, nh in filetags.items():
267 if k not in globaltags:
267 if k not in globaltags:
268 globaltags[k] = nh
268 globaltags[k] = nh
269 tagtypes[k] = tagtype
269 tagtypes[k] = tagtype
270 continue
270 continue
271
271
272 # we prefer the global tag if:
272 # we prefer the global tag if:
273 # it supercedes us OR
273 # it supercedes us OR
274 # mutual supercedes and it has a higher rank
274 # mutual supercedes and it has a higher rank
275 # otherwise we win because we're tip-most
275 # otherwise we win because we're tip-most
276 an, ah = nh
276 an, ah = nh
277 bn, bh = globaltags[k]
277 bn, bh = globaltags[k]
278 if (bn != an and an in bh and
278 if (bn != an and an in bh and
279 (bn not in ah or len(bh) > len(ah))):
279 (bn not in ah or len(bh) > len(ah))):
280 an = bn
280 an = bn
281 ah.extend([n for n in bh if n not in ah])
281 ah.extend([n for n in bh if n not in ah])
282 globaltags[k] = an, ah
282 globaltags[k] = an, ah
283 tagtypes[k] = tagtype
283 tagtypes[k] = tagtype
284
284
285 # read the tags file from each head, ending with the tip
285 # read the tags file from each head, ending with the tip
286 f = None
286 f = None
287 for rev, node, fnode in self._hgtagsnodes():
287 for rev, node, fnode in self._hgtagsnodes():
288 f = (f and f.filectx(fnode) or
288 f = (f and f.filectx(fnode) or
289 self.filectx('.hgtags', fileid=fnode))
289 self.filectx('.hgtags', fileid=fnode))
290 readtags(f.data().splitlines(), f, "global")
290 readtags(f.data().splitlines(), f, "global")
291
291
292 try:
292 try:
293 data = util.fromlocal(self.opener("localtags").read())
293 data = util.fromlocal(self.opener("localtags").read())
294 # localtags are stored in the local character set
294 # localtags are stored in the local character set
295 # while the internal tag table is stored in UTF-8
295 # while the internal tag table is stored in UTF-8
296 readtags(data.splitlines(), "localtags", "local")
296 readtags(data.splitlines(), "localtags", "local")
297 except IOError:
297 except IOError:
298 pass
298 pass
299
299
300 self.tagscache = {}
300 self.tagscache = {}
301 self._tagstypecache = {}
301 self._tagstypecache = {}
302 for k,nh in globaltags.items():
302 for k,nh in globaltags.items():
303 n = nh[0]
303 n = nh[0]
304 if n != nullid:
304 if n != nullid:
305 self.tagscache[k] = n
305 self.tagscache[k] = n
306 self._tagstypecache[k] = tagtypes[k]
306 self._tagstypecache[k] = tagtypes[k]
307 self.tagscache['tip'] = self.changelog.tip()
307 self.tagscache['tip'] = self.changelog.tip()
308 return self.tagscache
308 return self.tagscache
309
309
310 def tagtype(self, tagname):
310 def tagtype(self, tagname):
311 '''
311 '''
312 return the type of the given tag. result can be:
312 return the type of the given tag. result can be:
313
313
314 'local' : a local tag
314 'local' : a local tag
315 'global' : a global tag
315 'global' : a global tag
316 None : tag does not exist
316 None : tag does not exist
317 '''
317 '''
318
318
319 self.tags()
319 self.tags()
320
320
321 return self._tagstypecache.get(tagname)
321 return self._tagstypecache.get(tagname)
322
322
323 def _hgtagsnodes(self):
323 def _hgtagsnodes(self):
324 heads = self.heads()
324 heads = self.heads()
325 heads.reverse()
325 heads.reverse()
326 last = {}
326 last = {}
327 ret = []
327 ret = []
328 for node in heads:
328 for node in heads:
329 c = self[node]
329 c = self[node]
330 rev = c.rev()
330 rev = c.rev()
331 try:
331 try:
332 fnode = c.filenode('.hgtags')
332 fnode = c.filenode('.hgtags')
333 except revlog.LookupError:
333 except revlog.LookupError:
334 continue
334 continue
335 ret.append((rev, node, fnode))
335 ret.append((rev, node, fnode))
336 if fnode in last:
336 if fnode in last:
337 ret[last[fnode]] = None
337 ret[last[fnode]] = None
338 last[fnode] = len(ret) - 1
338 last[fnode] = len(ret) - 1
339 return [item for item in ret if item]
339 return [item for item in ret if item]
340
340
341 def tagslist(self):
341 def tagslist(self):
342 '''return a list of tags ordered by revision'''
342 '''return a list of tags ordered by revision'''
343 l = []
343 l = []
344 for t, n in self.tags().items():
344 for t, n in self.tags().items():
345 try:
345 try:
346 r = self.changelog.rev(n)
346 r = self.changelog.rev(n)
347 except:
347 except:
348 r = -2 # sort to the beginning of the list if unknown
348 r = -2 # sort to the beginning of the list if unknown
349 l.append((r, t, n))
349 l.append((r, t, n))
350 return [(t, n) for r, t, n in util.sort(l)]
350 return [(t, n) for r, t, n in util.sort(l)]
351
351
352 def nodetags(self, node):
352 def nodetags(self, node):
353 '''return the tags associated with a node'''
353 '''return the tags associated with a node'''
354 if not self.nodetagscache:
354 if not self.nodetagscache:
355 self.nodetagscache = {}
355 self.nodetagscache = {}
356 for t, n in self.tags().items():
356 for t, n in self.tags().items():
357 self.nodetagscache.setdefault(n, []).append(t)
357 self.nodetagscache.setdefault(n, []).append(t)
358 return self.nodetagscache.get(node, [])
358 return self.nodetagscache.get(node, [])
359
359
360 def _branchtags(self, partial, lrev):
360 def _branchtags(self, partial, lrev):
361 tiprev = len(self) - 1
361 tiprev = len(self) - 1
362 if lrev != tiprev:
362 if lrev != tiprev:
363 self._updatebranchcache(partial, lrev+1, tiprev+1)
363 self._updatebranchcache(partial, lrev+1, tiprev+1)
364 self._writebranchcache(partial, self.changelog.tip(), tiprev)
364 self._writebranchcache(partial, self.changelog.tip(), tiprev)
365
365
366 return partial
366 return partial
367
367
368 def branchtags(self):
368 def branchtags(self):
369 tip = self.changelog.tip()
369 tip = self.changelog.tip()
370 if self.branchcache is not None and self._branchcachetip == tip:
370 if self.branchcache is not None and self._branchcachetip == tip:
371 return self.branchcache
371 return self.branchcache
372
372
373 oldtip = self._branchcachetip
373 oldtip = self._branchcachetip
374 self._branchcachetip = tip
374 self._branchcachetip = tip
375 if self.branchcache is None:
375 if self.branchcache is None:
376 self.branchcache = {} # avoid recursion in changectx
376 self.branchcache = {} # avoid recursion in changectx
377 else:
377 else:
378 self.branchcache.clear() # keep using the same dict
378 self.branchcache.clear() # keep using the same dict
379 if oldtip is None or oldtip not in self.changelog.nodemap:
379 if oldtip is None or oldtip not in self.changelog.nodemap:
380 partial, last, lrev = self._readbranchcache()
380 partial, last, lrev = self._readbranchcache()
381 else:
381 else:
382 lrev = self.changelog.rev(oldtip)
382 lrev = self.changelog.rev(oldtip)
383 partial = self._ubranchcache
383 partial = self._ubranchcache
384
384
385 self._branchtags(partial, lrev)
385 self._branchtags(partial, lrev)
386
386
387 # the branch cache is stored on disk as UTF-8, but in the local
387 # the branch cache is stored on disk as UTF-8, but in the local
388 # charset internally
388 # charset internally
389 for k, v in partial.items():
389 for k, v in partial.items():
390 self.branchcache[util.tolocal(k)] = v
390 self.branchcache[util.tolocal(k)] = v
391 self._ubranchcache = partial
391 self._ubranchcache = partial
392 return self.branchcache
392 return self.branchcache
393
393
394 def _readbranchcache(self):
394 def _readbranchcache(self):
395 partial = {}
395 partial = {}
396 try:
396 try:
397 f = self.opener("branch.cache")
397 f = self.opener("branch.cache")
398 lines = f.read().split('\n')
398 lines = f.read().split('\n')
399 f.close()
399 f.close()
400 except (IOError, OSError):
400 except (IOError, OSError):
401 return {}, nullid, nullrev
401 return {}, nullid, nullrev
402
402
403 try:
403 try:
404 last, lrev = lines.pop(0).split(" ", 1)
404 last, lrev = lines.pop(0).split(" ", 1)
405 last, lrev = bin(last), int(lrev)
405 last, lrev = bin(last), int(lrev)
406 if lrev >= len(self) or self[lrev].node() != last:
406 if lrev >= len(self) or self[lrev].node() != last:
407 # invalidate the cache
407 # invalidate the cache
408 raise ValueError('invalidating branch cache (tip differs)')
408 raise ValueError('invalidating branch cache (tip differs)')
409 for l in lines:
409 for l in lines:
410 if not l: continue
410 if not l: continue
411 node, label = l.split(" ", 1)
411 node, label = l.split(" ", 1)
412 partial[label.strip()] = bin(node)
412 partial[label.strip()] = bin(node)
413 except (KeyboardInterrupt, util.SignalInterrupt):
413 except (KeyboardInterrupt, util.SignalInterrupt):
414 raise
414 raise
415 except Exception, inst:
415 except Exception, inst:
416 if self.ui.debugflag:
416 if self.ui.debugflag:
417 self.ui.warn(str(inst), '\n')
417 self.ui.warn(str(inst), '\n')
418 partial, last, lrev = {}, nullid, nullrev
418 partial, last, lrev = {}, nullid, nullrev
419 return partial, last, lrev
419 return partial, last, lrev
420
420
421 def _writebranchcache(self, branches, tip, tiprev):
421 def _writebranchcache(self, branches, tip, tiprev):
422 try:
422 try:
423 f = self.opener("branch.cache", "w", atomictemp=True)
423 f = self.opener("branch.cache", "w", atomictemp=True)
424 f.write("%s %s\n" % (hex(tip), tiprev))
424 f.write("%s %s\n" % (hex(tip), tiprev))
425 for label, node in branches.iteritems():
425 for label, node in branches.iteritems():
426 f.write("%s %s\n" % (hex(node), label))
426 f.write("%s %s\n" % (hex(node), label))
427 f.rename()
427 f.rename()
428 except (IOError, OSError):
428 except (IOError, OSError):
429 pass
429 pass
430
430
431 def _updatebranchcache(self, partial, start, end):
431 def _updatebranchcache(self, partial, start, end):
432 for r in xrange(start, end):
432 for r in xrange(start, end):
433 c = self[r]
433 c = self[r]
434 b = c.branch()
434 b = c.branch()
435 partial[b] = c.node()
435 partial[b] = c.node()
436
436
437 def lookup(self, key):
437 def lookup(self, key):
438 if key == '.':
438 if key == '.':
439 return self.dirstate.parents()[0]
439 return self.dirstate.parents()[0]
440 elif key == 'null':
440 elif key == 'null':
441 return nullid
441 return nullid
442 n = self.changelog._match(key)
442 n = self.changelog._match(key)
443 if n:
443 if n:
444 return n
444 return n
445 if key in self.tags():
445 if key in self.tags():
446 return self.tags()[key]
446 return self.tags()[key]
447 if key in self.branchtags():
447 if key in self.branchtags():
448 return self.branchtags()[key]
448 return self.branchtags()[key]
449 n = self.changelog._partialmatch(key)
449 n = self.changelog._partialmatch(key)
450 if n:
450 if n:
451 return n
451 return n
452 try:
452 try:
453 if len(key) == 20:
453 if len(key) == 20:
454 key = hex(key)
454 key = hex(key)
455 except:
455 except:
456 pass
456 pass
457 raise repo.RepoError(_("unknown revision '%s'") % key)
457 raise repo.RepoError(_("unknown revision '%s'") % key)
458
458
459 def local(self):
459 def local(self):
460 return True
460 return True
461
461
462 def join(self, f):
462 def join(self, f):
463 return os.path.join(self.path, f)
463 return os.path.join(self.path, f)
464
464
465 def wjoin(self, f):
465 def wjoin(self, f):
466 return os.path.join(self.root, f)
466 return os.path.join(self.root, f)
467
467
468 def rjoin(self, f):
468 def rjoin(self, f):
469 return os.path.join(self.root, util.pconvert(f))
469 return os.path.join(self.root, util.pconvert(f))
470
470
471 def file(self, f):
471 def file(self, f):
472 if f[0] == '/':
472 if f[0] == '/':
473 f = f[1:]
473 f = f[1:]
474 return filelog.filelog(self.sopener, f)
474 return filelog.filelog(self.sopener, f)
475
475
476 def changectx(self, changeid):
476 def changectx(self, changeid):
477 return self[changeid]
477 return self[changeid]
478
478
479 def parents(self, changeid=None):
479 def parents(self, changeid=None):
480 '''get list of changectxs for parents of changeid'''
480 '''get list of changectxs for parents of changeid'''
481 return self[changeid].parents()
481 return self[changeid].parents()
482
482
483 def filectx(self, path, changeid=None, fileid=None):
483 def filectx(self, path, changeid=None, fileid=None):
484 """changeid can be a changeset revision, node, or tag.
484 """changeid can be a changeset revision, node, or tag.
485 fileid can be a file revision or node."""
485 fileid can be a file revision or node."""
486 return context.filectx(self, path, changeid, fileid)
486 return context.filectx(self, path, changeid, fileid)
487
487
488 def getcwd(self):
488 def getcwd(self):
489 return self.dirstate.getcwd()
489 return self.dirstate.getcwd()
490
490
491 def pathto(self, f, cwd=None):
491 def pathto(self, f, cwd=None):
492 return self.dirstate.pathto(f, cwd)
492 return self.dirstate.pathto(f, cwd)
493
493
494 def wfile(self, f, mode='r'):
494 def wfile(self, f, mode='r'):
495 return self.wopener(f, mode)
495 return self.wopener(f, mode)
496
496
497 def _link(self, f):
497 def _link(self, f):
498 return os.path.islink(self.wjoin(f))
498 return os.path.islink(self.wjoin(f))
499
499
500 def _filter(self, filter, filename, data):
500 def _filter(self, filter, filename, data):
501 if filter not in self.filterpats:
501 if filter not in self.filterpats:
502 l = []
502 l = []
503 for pat, cmd in self.ui.configitems(filter):
503 for pat, cmd in self.ui.configitems(filter):
504 mf = util.matcher(self.root, "", [pat], [], [])[1]
504 mf = util.matcher(self.root, "", [pat], [], [])[1]
505 fn = None
505 fn = None
506 params = cmd
506 params = cmd
507 for name, filterfn in self._datafilters.iteritems():
507 for name, filterfn in self._datafilters.iteritems():
508 if cmd.startswith(name):
508 if cmd.startswith(name):
509 fn = filterfn
509 fn = filterfn
510 params = cmd[len(name):].lstrip()
510 params = cmd[len(name):].lstrip()
511 break
511 break
512 if not fn:
512 if not fn:
513 fn = lambda s, c, **kwargs: util.filter(s, c)
513 fn = lambda s, c, **kwargs: util.filter(s, c)
514 # Wrap old filters not supporting keyword arguments
514 # Wrap old filters not supporting keyword arguments
515 if not inspect.getargspec(fn)[2]:
515 if not inspect.getargspec(fn)[2]:
516 oldfn = fn
516 oldfn = fn
517 fn = lambda s, c, **kwargs: oldfn(s, c)
517 fn = lambda s, c, **kwargs: oldfn(s, c)
518 l.append((mf, fn, params))
518 l.append((mf, fn, params))
519 self.filterpats[filter] = l
519 self.filterpats[filter] = l
520
520
521 for mf, fn, cmd in self.filterpats[filter]:
521 for mf, fn, cmd in self.filterpats[filter]:
522 if mf(filename):
522 if mf(filename):
523 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
523 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
524 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
524 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
525 break
525 break
526
526
527 return data
527 return data
528
528
529 def adddatafilter(self, name, filter):
529 def adddatafilter(self, name, filter):
530 self._datafilters[name] = filter
530 self._datafilters[name] = filter
531
531
532 def wread(self, filename):
532 def wread(self, filename):
533 if self._link(filename):
533 if self._link(filename):
534 data = os.readlink(self.wjoin(filename))
534 data = os.readlink(self.wjoin(filename))
535 else:
535 else:
536 data = self.wopener(filename, 'r').read()
536 data = self.wopener(filename, 'r').read()
537 return self._filter("encode", filename, data)
537 return self._filter("encode", filename, data)
538
538
539 def wwrite(self, filename, data, flags):
539 def wwrite(self, filename, data, flags):
540 data = self._filter("decode", filename, data)
540 data = self._filter("decode", filename, data)
541 try:
541 try:
542 os.unlink(self.wjoin(filename))
542 os.unlink(self.wjoin(filename))
543 except OSError:
543 except OSError:
544 pass
544 pass
545 if 'l' in flags:
545 if 'l' in flags:
546 self.wopener.symlink(data, filename)
546 self.wopener.symlink(data, filename)
547 else:
547 else:
548 self.wopener(filename, 'w').write(data)
548 self.wopener(filename, 'w').write(data)
549 if 'x' in flags:
549 if 'x' in flags:
550 util.set_flags(self.wjoin(filename), False, True)
550 util.set_flags(self.wjoin(filename), False, True)
551
551
552 def wwritedata(self, filename, data):
552 def wwritedata(self, filename, data):
553 return self._filter("decode", filename, data)
553 return self._filter("decode", filename, data)
554
554
555 def transaction(self):
555 def transaction(self):
556 if self._transref and self._transref():
556 if self._transref and self._transref():
557 return self._transref().nest()
557 return self._transref().nest()
558
558
559 # abort here if the journal already exists
559 # abort here if the journal already exists
560 if os.path.exists(self.sjoin("journal")):
560 if os.path.exists(self.sjoin("journal")):
561 raise repo.RepoError(_("journal already exists - run hg recover"))
561 raise repo.RepoError(_("journal already exists - run hg recover"))
562
562
563 # save dirstate for rollback
563 # save dirstate for rollback
564 try:
564 try:
565 ds = self.opener("dirstate").read()
565 ds = self.opener("dirstate").read()
566 except IOError:
566 except IOError:
567 ds = ""
567 ds = ""
568 self.opener("journal.dirstate", "w").write(ds)
568 self.opener("journal.dirstate", "w").write(ds)
569 self.opener("journal.branch", "w").write(self.dirstate.branch())
569 self.opener("journal.branch", "w").write(self.dirstate.branch())
570
570
571 renames = [(self.sjoin("journal"), self.sjoin("undo")),
571 renames = [(self.sjoin("journal"), self.sjoin("undo")),
572 (self.join("journal.dirstate"), self.join("undo.dirstate")),
572 (self.join("journal.dirstate"), self.join("undo.dirstate")),
573 (self.join("journal.branch"), self.join("undo.branch"))]
573 (self.join("journal.branch"), self.join("undo.branch"))]
574 tr = transaction.transaction(self.ui.warn, self.sopener,
574 tr = transaction.transaction(self.ui.warn, self.sopener,
575 self.sjoin("journal"),
575 self.sjoin("journal"),
576 aftertrans(renames),
576 aftertrans(renames),
577 self.store.createmode)
577 self.store.createmode)
578 self._transref = weakref.ref(tr)
578 self._transref = weakref.ref(tr)
579 return tr
579 return tr
580
580
581 def recover(self):
581 def recover(self):
582 l = self.lock()
582 l = self.lock()
583 try:
583 try:
584 if os.path.exists(self.sjoin("journal")):
584 if os.path.exists(self.sjoin("journal")):
585 self.ui.status(_("rolling back interrupted transaction\n"))
585 self.ui.status(_("rolling back interrupted transaction\n"))
586 transaction.rollback(self.sopener, self.sjoin("journal"))
586 transaction.rollback(self.sopener, self.sjoin("journal"))
587 self.invalidate()
587 self.invalidate()
588 return True
588 return True
589 else:
589 else:
590 self.ui.warn(_("no interrupted transaction available\n"))
590 self.ui.warn(_("no interrupted transaction available\n"))
591 return False
591 return False
592 finally:
592 finally:
593 del l
593 del l
594
594
595 def rollback(self):
595 def rollback(self):
596 wlock = lock = None
596 wlock = lock = None
597 try:
597 try:
598 wlock = self.wlock()
598 wlock = self.wlock()
599 lock = self.lock()
599 lock = self.lock()
600 if os.path.exists(self.sjoin("undo")):
600 if os.path.exists(self.sjoin("undo")):
601 self.ui.status(_("rolling back last transaction\n"))
601 self.ui.status(_("rolling back last transaction\n"))
602 transaction.rollback(self.sopener, self.sjoin("undo"))
602 transaction.rollback(self.sopener, self.sjoin("undo"))
603 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
603 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
604 try:
604 try:
605 branch = self.opener("undo.branch").read()
605 branch = self.opener("undo.branch").read()
606 self.dirstate.setbranch(branch)
606 self.dirstate.setbranch(branch)
607 except IOError:
607 except IOError:
608 self.ui.warn(_("Named branch could not be reset, "
608 self.ui.warn(_("Named branch could not be reset, "
609 "current branch still is: %s\n")
609 "current branch still is: %s\n")
610 % util.tolocal(self.dirstate.branch()))
610 % util.tolocal(self.dirstate.branch()))
611 self.invalidate()
611 self.invalidate()
612 self.dirstate.invalidate()
612 self.dirstate.invalidate()
613 else:
613 else:
614 self.ui.warn(_("no rollback information available\n"))
614 self.ui.warn(_("no rollback information available\n"))
615 finally:
615 finally:
616 del lock, wlock
616 del lock, wlock
617
617
618 def invalidate(self):
618 def invalidate(self):
619 for a in "changelog manifest".split():
619 for a in "changelog manifest".split():
620 if a in self.__dict__:
620 if a in self.__dict__:
621 delattr(self, a)
621 delattr(self, a)
622 self.tagscache = None
622 self.tagscache = None
623 self._tagstypecache = None
623 self._tagstypecache = None
624 self.nodetagscache = None
624 self.nodetagscache = None
625 self.branchcache = None
625 self.branchcache = None
626 self._ubranchcache = None
626 self._ubranchcache = None
627 self._branchcachetip = None
627 self._branchcachetip = None
628
628
629 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
629 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
630 try:
630 try:
631 l = lock.lock(lockname, 0, releasefn, desc=desc)
631 l = lock.lock(lockname, 0, releasefn, desc=desc)
632 except lock.LockHeld, inst:
632 except lock.LockHeld, inst:
633 if not wait:
633 if not wait:
634 raise
634 raise
635 self.ui.warn(_("waiting for lock on %s held by %r\n") %
635 self.ui.warn(_("waiting for lock on %s held by %r\n") %
636 (desc, inst.locker))
636 (desc, inst.locker))
637 # default to 600 seconds timeout
637 # default to 600 seconds timeout
638 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
638 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
639 releasefn, desc=desc)
639 releasefn, desc=desc)
640 if acquirefn:
640 if acquirefn:
641 acquirefn()
641 acquirefn()
642 return l
642 return l
643
643
644 def lock(self, wait=True):
644 def lock(self, wait=True):
645 if self._lockref and self._lockref():
645 if self._lockref and self._lockref():
646 return self._lockref()
646 return self._lockref()
647
647
648 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
648 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
649 _('repository %s') % self.origroot)
649 _('repository %s') % self.origroot)
650 self._lockref = weakref.ref(l)
650 self._lockref = weakref.ref(l)
651 return l
651 return l
652
652
653 def wlock(self, wait=True):
653 def wlock(self, wait=True):
654 if self._wlockref and self._wlockref():
654 if self._wlockref and self._wlockref():
655 return self._wlockref()
655 return self._wlockref()
656
656
657 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
657 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
658 self.dirstate.invalidate, _('working directory of %s') %
658 self.dirstate.invalidate, _('working directory of %s') %
659 self.origroot)
659 self.origroot)
660 self._wlockref = weakref.ref(l)
660 self._wlockref = weakref.ref(l)
661 return l
661 return l
662
662
663 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
663 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
664 """
664 """
665 commit an individual file as part of a larger transaction
665 commit an individual file as part of a larger transaction
666 """
666 """
667
667
668 fn = fctx.path()
668 fn = fctx.path()
669 t = fctx.data()
669 t = fctx.data()
670 fl = self.file(fn)
670 fl = self.file(fn)
671 fp1 = manifest1.get(fn, nullid)
671 fp1 = manifest1.get(fn, nullid)
672 fp2 = manifest2.get(fn, nullid)
672 fp2 = manifest2.get(fn, nullid)
673
673
674 meta = {}
674 meta = {}
675 cp = fctx.renamed()
675 cp = fctx.renamed()
676 if cp and cp[0] != fn:
676 if cp and cp[0] != fn:
677 # Mark the new revision of this file as a copy of another
677 # Mark the new revision of this file as a copy of another
678 # file. This copy data will effectively act as a parent
678 # file. This copy data will effectively act as a parent
679 # of this new revision. If this is a merge, the first
679 # of this new revision. If this is a merge, the first
680 # parent will be the nullid (meaning "look up the copy data")
680 # parent will be the nullid (meaning "look up the copy data")
681 # and the second one will be the other parent. For example:
681 # and the second one will be the other parent. For example:
682 #
682 #
683 # 0 --- 1 --- 3 rev1 changes file foo
683 # 0 --- 1 --- 3 rev1 changes file foo
684 # \ / rev2 renames foo to bar and changes it
684 # \ / rev2 renames foo to bar and changes it
685 # \- 2 -/ rev3 should have bar with all changes and
685 # \- 2 -/ rev3 should have bar with all changes and
686 # should record that bar descends from
686 # should record that bar descends from
687 # bar in rev2 and foo in rev1
687 # bar in rev2 and foo in rev1
688 #
688 #
689 # this allows this merge to succeed:
689 # this allows this merge to succeed:
690 #
690 #
691 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
691 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
692 # \ / merging rev3 and rev4 should use bar@rev2
692 # \ / merging rev3 and rev4 should use bar@rev2
693 # \- 2 --- 4 as the merge base
693 # \- 2 --- 4 as the merge base
694 #
694 #
695
695
696 cf = cp[0]
696 cf = cp[0]
697 cr = manifest1.get(cf)
697 cr = manifest1.get(cf)
698 nfp = fp2
698 nfp = fp2
699
699
700 if manifest2: # branch merge
700 if manifest2: # branch merge
701 if fp2 == nullid: # copied on remote side
701 if fp2 == nullid: # copied on remote side
702 if fp1 != nullid or cf in manifest2:
702 if fp1 != nullid or cf in manifest2:
703 cr = manifest2[cf]
703 cr = manifest2[cf]
704 nfp = fp1
704 nfp = fp1
705
705
706 # find source in nearest ancestor if we've lost track
706 # find source in nearest ancestor if we've lost track
707 if not cr:
707 if not cr:
708 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
708 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
709 (fn, cf))
709 (fn, cf))
710 for a in self['.'].ancestors():
710 for a in self['.'].ancestors():
711 if cf in a:
711 if cf in a:
712 cr = a[cf].filenode()
712 cr = a[cf].filenode()
713 break
713 break
714
714
715 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr)))
715 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr)))
716 meta["copy"] = cf
716 meta["copy"] = cf
717 meta["copyrev"] = hex(cr)
717 meta["copyrev"] = hex(cr)
718 fp1, fp2 = nullid, nfp
718 fp1, fp2 = nullid, nfp
719 elif fp2 != nullid:
719 elif fp2 != nullid:
720 # is one parent an ancestor of the other?
720 # is one parent an ancestor of the other?
721 fpa = fl.ancestor(fp1, fp2)
721 fpa = fl.ancestor(fp1, fp2)
722 if fpa == fp1:
722 if fpa == fp1:
723 fp1, fp2 = fp2, nullid
723 fp1, fp2 = fp2, nullid
724 elif fpa == fp2:
724 elif fpa == fp2:
725 fp2 = nullid
725 fp2 = nullid
726
726
727 # is the file unmodified from the parent? report existing entry
727 # is the file unmodified from the parent? report existing entry
728 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
728 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
729 return fp1
729 return fp1
730
730
731 changelist.append(fn)
731 changelist.append(fn)
732 return fl.add(t, meta, tr, linkrev, fp1, fp2)
732 return fl.add(t, meta, tr, linkrev, fp1, fp2)
733
733
734 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
734 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
735 if p1 is None:
735 if p1 is None:
736 p1, p2 = self.dirstate.parents()
736 p1, p2 = self.dirstate.parents()
737 return self.commit(files=files, text=text, user=user, date=date,
737 return self.commit(files=files, text=text, user=user, date=date,
738 p1=p1, p2=p2, extra=extra, empty_ok=True)
738 p1=p1, p2=p2, extra=extra, empty_ok=True)
739
739
740 def commit(self, files=None, text="", user=None, date=None,
740 def commit(self, files=None, text="", user=None, date=None,
741 match=None, force=False, force_editor=False,
741 match=None, force=False, force_editor=False,
742 p1=None, p2=None, extra={}, empty_ok=False):
742 p1=None, p2=None, extra={}, empty_ok=False):
743 wlock = lock = None
743 wlock = lock = None
744 if files:
744 if files:
745 files = util.unique(files)
745 files = util.unique(files)
746 try:
746 try:
747 wlock = self.wlock()
747 wlock = self.wlock()
748 lock = self.lock()
748 lock = self.lock()
749 use_dirstate = (p1 is None) # not rawcommit
749 use_dirstate = (p1 is None) # not rawcommit
750
750
751 if use_dirstate:
751 if use_dirstate:
752 p1, p2 = self.dirstate.parents()
752 p1, p2 = self.dirstate.parents()
753 update_dirstate = True
753 update_dirstate = True
754
754
755 if (not force and p2 != nullid and
755 if (not force and p2 != nullid and
756 (match and (match.files() or match.anypats()))):
756 (match and (match.files() or match.anypats()))):
757 raise util.Abort(_('cannot partially commit a merge '
757 raise util.Abort(_('cannot partially commit a merge '
758 '(do not specify files or patterns)'))
758 '(do not specify files or patterns)'))
759
759
760 if files:
760 if files:
761 modified, removed = [], []
761 modified, removed = [], []
762 for f in files:
762 for f in files:
763 s = self.dirstate[f]
763 s = self.dirstate[f]
764 if s in 'nma':
764 if s in 'nma':
765 modified.append(f)
765 modified.append(f)
766 elif s == 'r':
766 elif s == 'r':
767 removed.append(f)
767 removed.append(f)
768 else:
768 else:
769 self.ui.warn(_("%s not tracked!\n") % f)
769 self.ui.warn(_("%s not tracked!\n") % f)
770 changes = [modified, [], removed, [], []]
770 changes = [modified, [], removed, [], []]
771 else:
771 else:
772 changes = self.status(match=match)
772 changes = self.status(match=match)
773 else:
773 else:
774 p1, p2 = p1, p2 or nullid
774 p1, p2 = p1, p2 or nullid
775 update_dirstate = (self.dirstate.parents()[0] == p1)
775 update_dirstate = (self.dirstate.parents()[0] == p1)
776 changes = [files, [], [], [], []]
776 changes = [files, [], [], [], []]
777
777
778 ms = merge_.mergestate(self)
778 ms = merge_.mergestate(self)
779 for f in changes[0]:
779 for f in changes[0]:
780 if f in ms and ms[f] == 'u':
780 if f in ms and ms[f] == 'u':
781 raise util.Abort(_("unresolved merge conflicts "
781 raise util.Abort(_("unresolved merge conflicts "
782 "(see hg resolve)"))
782 "(see hg resolve)"))
783 wctx = context.workingctx(self, (p1, p2), text, user, date,
783 wctx = context.workingctx(self, (p1, p2), text, user, date,
784 extra, changes)
784 extra, changes)
785 return self._commitctx(wctx, force, force_editor, empty_ok,
785 return self._commitctx(wctx, force, force_editor, empty_ok,
786 use_dirstate, update_dirstate)
786 use_dirstate, update_dirstate)
787 finally:
787 finally:
788 del lock, wlock
788 del lock, wlock
789
789
790 def commitctx(self, ctx):
790 def commitctx(self, ctx):
791 wlock = lock = None
791 wlock = lock = None
792 try:
792 try:
793 wlock = self.wlock()
793 wlock = self.wlock()
794 lock = self.lock()
794 lock = self.lock()
795 return self._commitctx(ctx, force=True, force_editor=False,
795 return self._commitctx(ctx, force=True, force_editor=False,
796 empty_ok=True, use_dirstate=False,
796 empty_ok=True, use_dirstate=False,
797 update_dirstate=False)
797 update_dirstate=False)
798 finally:
798 finally:
799 del lock, wlock
799 del lock, wlock
800
800
801 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
801 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
802 use_dirstate=True, update_dirstate=True):
802 use_dirstate=True, update_dirstate=True):
803 tr = None
803 tr = None
804 valid = 0 # don't save the dirstate if this isn't set
804 valid = 0 # don't save the dirstate if this isn't set
805 try:
805 try:
806 commit = util.sort(wctx.modified() + wctx.added())
806 commit = util.sort(wctx.modified() + wctx.added())
807 remove = wctx.removed()
807 remove = wctx.removed()
808 extra = wctx.extra().copy()
808 extra = wctx.extra().copy()
809 branchname = extra['branch']
809 branchname = extra['branch']
810 user = wctx.user()
810 user = wctx.user()
811 text = wctx.description()
811 text = wctx.description()
812
812
813 p1, p2 = [p.node() for p in wctx.parents()]
813 p1, p2 = [p.node() for p in wctx.parents()]
814 c1 = self.changelog.read(p1)
814 c1 = self.changelog.read(p1)
815 c2 = self.changelog.read(p2)
815 c2 = self.changelog.read(p2)
816 m1 = self.manifest.read(c1[0]).copy()
816 m1 = self.manifest.read(c1[0]).copy()
817 m2 = self.manifest.read(c2[0])
817 m2 = self.manifest.read(c2[0])
818
818
819 if use_dirstate:
819 if use_dirstate:
820 oldname = c1[5].get("branch") # stored in UTF-8
820 oldname = c1[5].get("branch") # stored in UTF-8
821 if (not commit and not remove and not force and p2 == nullid
821 if (not commit and not remove and not force and p2 == nullid
822 and branchname == oldname):
822 and branchname == oldname):
823 self.ui.status(_("nothing changed\n"))
823 self.ui.status(_("nothing changed\n"))
824 return None
824 return None
825
825
826 xp1 = hex(p1)
826 xp1 = hex(p1)
827 if p2 == nullid: xp2 = ''
827 if p2 == nullid: xp2 = ''
828 else: xp2 = hex(p2)
828 else: xp2 = hex(p2)
829
829
830 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
830 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
831
831
832 tr = self.transaction()
832 tr = self.transaction()
833 trp = weakref.proxy(tr)
833 trp = weakref.proxy(tr)
834
834
835 # check in files
835 # check in files
836 new = {}
836 new = {}
837 changed = []
837 changed = []
838 linkrev = len(self)
838 linkrev = len(self)
839 for f in commit:
839 for f in commit:
840 self.ui.note(f + "\n")
840 self.ui.note(f + "\n")
841 try:
841 try:
842 fctx = wctx.filectx(f)
842 fctx = wctx.filectx(f)
843 newflags = fctx.flags()
843 newflags = fctx.flags()
844 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
844 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
845 if ((not changed or changed[-1] != f) and
845 if ((not changed or changed[-1] != f) and
846 m2.get(f) != new[f]):
846 m2.get(f) != new[f]):
847 # mention the file in the changelog if some
847 # mention the file in the changelog if some
848 # flag changed, even if there was no content
848 # flag changed, even if there was no content
849 # change.
849 # change.
850 if m1.flags(f) != newflags:
850 if m1.flags(f) != newflags:
851 changed.append(f)
851 changed.append(f)
852 m1.set(f, newflags)
852 m1.set(f, newflags)
853 if use_dirstate:
853 if use_dirstate:
854 self.dirstate.normal(f)
854 self.dirstate.normal(f)
855
855
856 except (OSError, IOError):
856 except (OSError, IOError):
857 if use_dirstate:
857 if use_dirstate:
858 self.ui.warn(_("trouble committing %s!\n") % f)
858 self.ui.warn(_("trouble committing %s!\n") % f)
859 raise
859 raise
860 else:
860 else:
861 remove.append(f)
861 remove.append(f)
862
862
863 updated, added = [], []
864 for f in util.sort(changed):
865 if f in m1 or f in m2:
866 updated.append(f)
867 else:
868 added.append(f)
869
863 # update manifest
870 # update manifest
864 m1.update(new)
871 m1.update(new)
865 removed = []
872 removed = []
866
873
867 for f in util.sort(remove):
874 for f in util.sort(remove):
868 if f in m1:
875 if f in m1:
869 del m1[f]
876 del m1[f]
870 removed.append(f)
877 removed.append(f)
871 elif f in m2:
878 elif f in m2:
872 removed.append(f)
879 removed.append(f)
873 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
880 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
874 (new, removed))
881 (new, removed))
875
882
876 # add changeset
883 # add changeset
877 if (not empty_ok and not text) or force_editor:
884 if (not empty_ok and not text) or force_editor:
878 edittext = []
885 edittext = []
879 if text:
886 if text:
880 edittext.append(text)
887 edittext.append(text)
881 edittext.append("")
888 edittext.append("")
882 edittext.append("") # Empty line between message and comments.
889 edittext.append("") # Empty line between message and comments.
883 edittext.append(_("HG: Enter commit message."
890 edittext.append(_("HG: Enter commit message."
884 " Lines beginning with 'HG:' are removed."))
891 " Lines beginning with 'HG:' are removed."))
885 edittext.append("HG: --")
892 edittext.append("HG: --")
886 edittext.append("HG: user: %s" % user)
893 edittext.append("HG: user: %s" % user)
887 if p2 != nullid:
894 if p2 != nullid:
888 edittext.append("HG: branch merge")
895 edittext.append("HG: branch merge")
889 if branchname:
896 if branchname:
890 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
897 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
891 edittext.extend(["HG: changed %s" % f for f in changed])
898 edittext.extend(["HG: added %s" % f for f in added])
899 edittext.extend(["HG: changed %s" % f for f in updated])
892 edittext.extend(["HG: removed %s" % f for f in removed])
900 edittext.extend(["HG: removed %s" % f for f in removed])
893 if not changed and not remove:
901 if not added and not updated and not removed:
894 edittext.append("HG: no files changed")
902 edittext.append("HG: no files changed")
895 edittext.append("")
903 edittext.append("")
896 # run editor in the repository root
904 # run editor in the repository root
897 olddir = os.getcwd()
905 olddir = os.getcwd()
898 os.chdir(self.root)
906 os.chdir(self.root)
899 text = self.ui.edit("\n".join(edittext), user)
907 text = self.ui.edit("\n".join(edittext), user)
900 os.chdir(olddir)
908 os.chdir(olddir)
901
909
902 lines = [line.rstrip() for line in text.rstrip().splitlines()]
910 lines = [line.rstrip() for line in text.rstrip().splitlines()]
903 while lines and not lines[0]:
911 while lines and not lines[0]:
904 del lines[0]
912 del lines[0]
905 if not lines and use_dirstate:
913 if not lines and use_dirstate:
906 raise util.Abort(_("empty commit message"))
914 raise util.Abort(_("empty commit message"))
907 text = '\n'.join(lines)
915 text = '\n'.join(lines)
908
916
909 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
917 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
910 user, wctx.date(), extra)
918 user, wctx.date(), extra)
911 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
919 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
912 parent2=xp2)
920 parent2=xp2)
913 tr.close()
921 tr.close()
914
922
915 if self.branchcache:
923 if self.branchcache:
916 self.branchtags()
924 self.branchtags()
917
925
918 if use_dirstate or update_dirstate:
926 if use_dirstate or update_dirstate:
919 self.dirstate.setparents(n)
927 self.dirstate.setparents(n)
920 if use_dirstate:
928 if use_dirstate:
921 for f in removed:
929 for f in removed:
922 self.dirstate.forget(f)
930 self.dirstate.forget(f)
923 valid = 1 # our dirstate updates are complete
931 valid = 1 # our dirstate updates are complete
924
932
925 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
933 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
926 return n
934 return n
927 finally:
935 finally:
928 if not valid: # don't save our updated dirstate
936 if not valid: # don't save our updated dirstate
929 self.dirstate.invalidate()
937 self.dirstate.invalidate()
930 del tr
938 del tr
931
939
932 def walk(self, match, node=None):
940 def walk(self, match, node=None):
933 '''
941 '''
934 walk recursively through the directory tree or a given
942 walk recursively through the directory tree or a given
935 changeset, finding all files matched by the match
943 changeset, finding all files matched by the match
936 function
944 function
937 '''
945 '''
938 return self[node].walk(match)
946 return self[node].walk(match)
939
947
940 def status(self, node1='.', node2=None, match=None,
948 def status(self, node1='.', node2=None, match=None,
941 ignored=False, clean=False, unknown=False):
949 ignored=False, clean=False, unknown=False):
942 """return status of files between two nodes or node and working directory
950 """return status of files between two nodes or node and working directory
943
951
944 If node1 is None, use the first dirstate parent instead.
952 If node1 is None, use the first dirstate parent instead.
945 If node2 is None, compare node1 with working directory.
953 If node2 is None, compare node1 with working directory.
946 """
954 """
947
955
948 def mfmatches(ctx):
956 def mfmatches(ctx):
949 mf = ctx.manifest().copy()
957 mf = ctx.manifest().copy()
950 for fn in mf.keys():
958 for fn in mf.keys():
951 if not match(fn):
959 if not match(fn):
952 del mf[fn]
960 del mf[fn]
953 return mf
961 return mf
954
962
955 ctx1 = self[node1]
963 ctx1 = self[node1]
956 ctx2 = self[node2]
964 ctx2 = self[node2]
957 working = ctx2 == self[None]
965 working = ctx2 == self[None]
958 parentworking = working and ctx1 == self['.']
966 parentworking = working and ctx1 == self['.']
959 match = match or match_.always(self.root, self.getcwd())
967 match = match or match_.always(self.root, self.getcwd())
960 listignored, listclean, listunknown = ignored, clean, unknown
968 listignored, listclean, listunknown = ignored, clean, unknown
961
969
962 if not parentworking:
970 if not parentworking:
963 def bad(f, msg):
971 def bad(f, msg):
964 if f not in ctx1:
972 if f not in ctx1:
965 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
973 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
966 return False
974 return False
967 match.bad = bad
975 match.bad = bad
968
976
969 if working: # we need to scan the working dir
977 if working: # we need to scan the working dir
970 s = self.dirstate.status(match, listignored, listclean, listunknown)
978 s = self.dirstate.status(match, listignored, listclean, listunknown)
971 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
979 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
972
980
973 # check for any possibly clean files
981 # check for any possibly clean files
974 if parentworking and cmp:
982 if parentworking and cmp:
975 fixup = []
983 fixup = []
976 # do a full compare of any files that might have changed
984 # do a full compare of any files that might have changed
977 for f in cmp:
985 for f in cmp:
978 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
986 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
979 or ctx1[f].cmp(ctx2[f].data())):
987 or ctx1[f].cmp(ctx2[f].data())):
980 modified.append(f)
988 modified.append(f)
981 else:
989 else:
982 fixup.append(f)
990 fixup.append(f)
983
991
984 if listclean:
992 if listclean:
985 clean += fixup
993 clean += fixup
986
994
987 # update dirstate for files that are actually clean
995 # update dirstate for files that are actually clean
988 if fixup:
996 if fixup:
989 wlock = None
997 wlock = None
990 try:
998 try:
991 try:
999 try:
992 wlock = self.wlock(False)
1000 wlock = self.wlock(False)
993 for f in fixup:
1001 for f in fixup:
994 self.dirstate.normal(f)
1002 self.dirstate.normal(f)
995 except lock.LockException:
1003 except lock.LockException:
996 pass
1004 pass
997 finally:
1005 finally:
998 del wlock
1006 del wlock
999
1007
1000 if not parentworking:
1008 if not parentworking:
1001 mf1 = mfmatches(ctx1)
1009 mf1 = mfmatches(ctx1)
1002 if working:
1010 if working:
1003 # we are comparing working dir against non-parent
1011 # we are comparing working dir against non-parent
1004 # generate a pseudo-manifest for the working dir
1012 # generate a pseudo-manifest for the working dir
1005 mf2 = mfmatches(self['.'])
1013 mf2 = mfmatches(self['.'])
1006 for f in cmp + modified + added:
1014 for f in cmp + modified + added:
1007 mf2[f] = None
1015 mf2[f] = None
1008 mf2.set(f, ctx2.flags(f))
1016 mf2.set(f, ctx2.flags(f))
1009 for f in removed:
1017 for f in removed:
1010 if f in mf2:
1018 if f in mf2:
1011 del mf2[f]
1019 del mf2[f]
1012 else:
1020 else:
1013 # we are comparing two revisions
1021 # we are comparing two revisions
1014 deleted, unknown, ignored = [], [], []
1022 deleted, unknown, ignored = [], [], []
1015 mf2 = mfmatches(ctx2)
1023 mf2 = mfmatches(ctx2)
1016
1024
1017 modified, added, clean = [], [], []
1025 modified, added, clean = [], [], []
1018 for fn in mf2:
1026 for fn in mf2:
1019 if fn in mf1:
1027 if fn in mf1:
1020 if (mf1.flags(fn) != mf2.flags(fn) or
1028 if (mf1.flags(fn) != mf2.flags(fn) or
1021 (mf1[fn] != mf2[fn] and
1029 (mf1[fn] != mf2[fn] and
1022 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1030 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1023 modified.append(fn)
1031 modified.append(fn)
1024 elif listclean:
1032 elif listclean:
1025 clean.append(fn)
1033 clean.append(fn)
1026 del mf1[fn]
1034 del mf1[fn]
1027 else:
1035 else:
1028 added.append(fn)
1036 added.append(fn)
1029 removed = mf1.keys()
1037 removed = mf1.keys()
1030
1038
1031 r = modified, added, removed, deleted, unknown, ignored, clean
1039 r = modified, added, removed, deleted, unknown, ignored, clean
1032 [l.sort() for l in r]
1040 [l.sort() for l in r]
1033 return r
1041 return r
1034
1042
1035 def add(self, list):
1043 def add(self, list):
1036 wlock = self.wlock()
1044 wlock = self.wlock()
1037 try:
1045 try:
1038 rejected = []
1046 rejected = []
1039 for f in list:
1047 for f in list:
1040 p = self.wjoin(f)
1048 p = self.wjoin(f)
1041 try:
1049 try:
1042 st = os.lstat(p)
1050 st = os.lstat(p)
1043 except:
1051 except:
1044 self.ui.warn(_("%s does not exist!\n") % f)
1052 self.ui.warn(_("%s does not exist!\n") % f)
1045 rejected.append(f)
1053 rejected.append(f)
1046 continue
1054 continue
1047 if st.st_size > 10000000:
1055 if st.st_size > 10000000:
1048 self.ui.warn(_("%s: files over 10MB may cause memory and"
1056 self.ui.warn(_("%s: files over 10MB may cause memory and"
1049 " performance problems\n"
1057 " performance problems\n"
1050 "(use 'hg revert %s' to unadd the file)\n")
1058 "(use 'hg revert %s' to unadd the file)\n")
1051 % (f, f))
1059 % (f, f))
1052 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1060 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1053 self.ui.warn(_("%s not added: only files and symlinks "
1061 self.ui.warn(_("%s not added: only files and symlinks "
1054 "supported currently\n") % f)
1062 "supported currently\n") % f)
1055 rejected.append(p)
1063 rejected.append(p)
1056 elif self.dirstate[f] in 'amn':
1064 elif self.dirstate[f] in 'amn':
1057 self.ui.warn(_("%s already tracked!\n") % f)
1065 self.ui.warn(_("%s already tracked!\n") % f)
1058 elif self.dirstate[f] == 'r':
1066 elif self.dirstate[f] == 'r':
1059 self.dirstate.normallookup(f)
1067 self.dirstate.normallookup(f)
1060 else:
1068 else:
1061 self.dirstate.add(f)
1069 self.dirstate.add(f)
1062 return rejected
1070 return rejected
1063 finally:
1071 finally:
1064 del wlock
1072 del wlock
1065
1073
1066 def forget(self, list):
1074 def forget(self, list):
1067 wlock = self.wlock()
1075 wlock = self.wlock()
1068 try:
1076 try:
1069 for f in list:
1077 for f in list:
1070 if self.dirstate[f] != 'a':
1078 if self.dirstate[f] != 'a':
1071 self.ui.warn(_("%s not added!\n") % f)
1079 self.ui.warn(_("%s not added!\n") % f)
1072 else:
1080 else:
1073 self.dirstate.forget(f)
1081 self.dirstate.forget(f)
1074 finally:
1082 finally:
1075 del wlock
1083 del wlock
1076
1084
1077 def remove(self, list, unlink=False):
1085 def remove(self, list, unlink=False):
1078 wlock = None
1086 wlock = None
1079 try:
1087 try:
1080 if unlink:
1088 if unlink:
1081 for f in list:
1089 for f in list:
1082 try:
1090 try:
1083 util.unlink(self.wjoin(f))
1091 util.unlink(self.wjoin(f))
1084 except OSError, inst:
1092 except OSError, inst:
1085 if inst.errno != errno.ENOENT:
1093 if inst.errno != errno.ENOENT:
1086 raise
1094 raise
1087 wlock = self.wlock()
1095 wlock = self.wlock()
1088 for f in list:
1096 for f in list:
1089 if unlink and os.path.exists(self.wjoin(f)):
1097 if unlink and os.path.exists(self.wjoin(f)):
1090 self.ui.warn(_("%s still exists!\n") % f)
1098 self.ui.warn(_("%s still exists!\n") % f)
1091 elif self.dirstate[f] == 'a':
1099 elif self.dirstate[f] == 'a':
1092 self.dirstate.forget(f)
1100 self.dirstate.forget(f)
1093 elif f not in self.dirstate:
1101 elif f not in self.dirstate:
1094 self.ui.warn(_("%s not tracked!\n") % f)
1102 self.ui.warn(_("%s not tracked!\n") % f)
1095 else:
1103 else:
1096 self.dirstate.remove(f)
1104 self.dirstate.remove(f)
1097 finally:
1105 finally:
1098 del wlock
1106 del wlock
1099
1107
1100 def undelete(self, list):
1108 def undelete(self, list):
1101 wlock = None
1109 wlock = None
1102 try:
1110 try:
1103 manifests = [self.manifest.read(self.changelog.read(p)[0])
1111 manifests = [self.manifest.read(self.changelog.read(p)[0])
1104 for p in self.dirstate.parents() if p != nullid]
1112 for p in self.dirstate.parents() if p != nullid]
1105 wlock = self.wlock()
1113 wlock = self.wlock()
1106 for f in list:
1114 for f in list:
1107 if self.dirstate[f] != 'r':
1115 if self.dirstate[f] != 'r':
1108 self.ui.warn(_("%s not removed!\n") % f)
1116 self.ui.warn(_("%s not removed!\n") % f)
1109 else:
1117 else:
1110 m = f in manifests[0] and manifests[0] or manifests[1]
1118 m = f in manifests[0] and manifests[0] or manifests[1]
1111 t = self.file(f).read(m[f])
1119 t = self.file(f).read(m[f])
1112 self.wwrite(f, t, m.flags(f))
1120 self.wwrite(f, t, m.flags(f))
1113 self.dirstate.normal(f)
1121 self.dirstate.normal(f)
1114 finally:
1122 finally:
1115 del wlock
1123 del wlock
1116
1124
1117 def copy(self, source, dest):
1125 def copy(self, source, dest):
1118 wlock = None
1126 wlock = None
1119 try:
1127 try:
1120 p = self.wjoin(dest)
1128 p = self.wjoin(dest)
1121 if not (os.path.exists(p) or os.path.islink(p)):
1129 if not (os.path.exists(p) or os.path.islink(p)):
1122 self.ui.warn(_("%s does not exist!\n") % dest)
1130 self.ui.warn(_("%s does not exist!\n") % dest)
1123 elif not (os.path.isfile(p) or os.path.islink(p)):
1131 elif not (os.path.isfile(p) or os.path.islink(p)):
1124 self.ui.warn(_("copy failed: %s is not a file or a "
1132 self.ui.warn(_("copy failed: %s is not a file or a "
1125 "symbolic link\n") % dest)
1133 "symbolic link\n") % dest)
1126 else:
1134 else:
1127 wlock = self.wlock()
1135 wlock = self.wlock()
1128 if dest not in self.dirstate:
1136 if dest not in self.dirstate:
1129 self.dirstate.add(dest)
1137 self.dirstate.add(dest)
1130 self.dirstate.copy(source, dest)
1138 self.dirstate.copy(source, dest)
1131 finally:
1139 finally:
1132 del wlock
1140 del wlock
1133
1141
1134 def heads(self, start=None):
1142 def heads(self, start=None):
1135 heads = self.changelog.heads(start)
1143 heads = self.changelog.heads(start)
1136 # sort the output in rev descending order
1144 # sort the output in rev descending order
1137 heads = [(-self.changelog.rev(h), h) for h in heads]
1145 heads = [(-self.changelog.rev(h), h) for h in heads]
1138 return [n for (r, n) in util.sort(heads)]
1146 return [n for (r, n) in util.sort(heads)]
1139
1147
1140 def branchheads(self, branch=None, start=None):
1148 def branchheads(self, branch=None, start=None):
1141 if branch is None:
1149 if branch is None:
1142 branch = self[None].branch()
1150 branch = self[None].branch()
1143 branches = self.branchtags()
1151 branches = self.branchtags()
1144 if branch not in branches:
1152 if branch not in branches:
1145 return []
1153 return []
1146 # The basic algorithm is this:
1154 # The basic algorithm is this:
1147 #
1155 #
1148 # Start from the branch tip since there are no later revisions that can
1156 # Start from the branch tip since there are no later revisions that can
1149 # possibly be in this branch, and the tip is a guaranteed head.
1157 # possibly be in this branch, and the tip is a guaranteed head.
1150 #
1158 #
1151 # Remember the tip's parents as the first ancestors, since these by
1159 # Remember the tip's parents as the first ancestors, since these by
1152 # definition are not heads.
1160 # definition are not heads.
1153 #
1161 #
1154 # Step backwards from the brach tip through all the revisions. We are
1162 # Step backwards from the brach tip through all the revisions. We are
1155 # guaranteed by the rules of Mercurial that we will now be visiting the
1163 # guaranteed by the rules of Mercurial that we will now be visiting the
1156 # nodes in reverse topological order (children before parents).
1164 # nodes in reverse topological order (children before parents).
1157 #
1165 #
1158 # If a revision is one of the ancestors of a head then we can toss it
1166 # If a revision is one of the ancestors of a head then we can toss it
1159 # out of the ancestors set (we've already found it and won't be
1167 # out of the ancestors set (we've already found it and won't be
1160 # visiting it again) and put its parents in the ancestors set.
1168 # visiting it again) and put its parents in the ancestors set.
1161 #
1169 #
1162 # Otherwise, if a revision is in the branch it's another head, since it
1170 # Otherwise, if a revision is in the branch it's another head, since it
1163 # wasn't in the ancestor list of an existing head. So add it to the
1171 # wasn't in the ancestor list of an existing head. So add it to the
1164 # head list, and add its parents to the ancestor list.
1172 # head list, and add its parents to the ancestor list.
1165 #
1173 #
1166 # If it is not in the branch ignore it.
1174 # If it is not in the branch ignore it.
1167 #
1175 #
1168 # Once we have a list of heads, use nodesbetween to filter out all the
1176 # Once we have a list of heads, use nodesbetween to filter out all the
1169 # heads that cannot be reached from startrev. There may be a more
1177 # heads that cannot be reached from startrev. There may be a more
1170 # efficient way to do this as part of the previous algorithm.
1178 # efficient way to do this as part of the previous algorithm.
1171
1179
1172 set = util.set
1180 set = util.set
1173 heads = [self.changelog.rev(branches[branch])]
1181 heads = [self.changelog.rev(branches[branch])]
1174 # Don't care if ancestors contains nullrev or not.
1182 # Don't care if ancestors contains nullrev or not.
1175 ancestors = set(self.changelog.parentrevs(heads[0]))
1183 ancestors = set(self.changelog.parentrevs(heads[0]))
1176 for rev in xrange(heads[0] - 1, nullrev, -1):
1184 for rev in xrange(heads[0] - 1, nullrev, -1):
1177 if rev in ancestors:
1185 if rev in ancestors:
1178 ancestors.update(self.changelog.parentrevs(rev))
1186 ancestors.update(self.changelog.parentrevs(rev))
1179 ancestors.remove(rev)
1187 ancestors.remove(rev)
1180 elif self[rev].branch() == branch:
1188 elif self[rev].branch() == branch:
1181 heads.append(rev)
1189 heads.append(rev)
1182 ancestors.update(self.changelog.parentrevs(rev))
1190 ancestors.update(self.changelog.parentrevs(rev))
1183 heads = [self.changelog.node(rev) for rev in heads]
1191 heads = [self.changelog.node(rev) for rev in heads]
1184 if start is not None:
1192 if start is not None:
1185 heads = self.changelog.nodesbetween([start], heads)[2]
1193 heads = self.changelog.nodesbetween([start], heads)[2]
1186 return heads
1194 return heads
1187
1195
1188 def branches(self, nodes):
1196 def branches(self, nodes):
1189 if not nodes:
1197 if not nodes:
1190 nodes = [self.changelog.tip()]
1198 nodes = [self.changelog.tip()]
1191 b = []
1199 b = []
1192 for n in nodes:
1200 for n in nodes:
1193 t = n
1201 t = n
1194 while 1:
1202 while 1:
1195 p = self.changelog.parents(n)
1203 p = self.changelog.parents(n)
1196 if p[1] != nullid or p[0] == nullid:
1204 if p[1] != nullid or p[0] == nullid:
1197 b.append((t, n, p[0], p[1]))
1205 b.append((t, n, p[0], p[1]))
1198 break
1206 break
1199 n = p[0]
1207 n = p[0]
1200 return b
1208 return b
1201
1209
1202 def between(self, pairs):
1210 def between(self, pairs):
1203 r = []
1211 r = []
1204
1212
1205 for top, bottom in pairs:
1213 for top, bottom in pairs:
1206 n, l, i = top, [], 0
1214 n, l, i = top, [], 0
1207 f = 1
1215 f = 1
1208
1216
1209 while n != bottom:
1217 while n != bottom:
1210 p = self.changelog.parents(n)[0]
1218 p = self.changelog.parents(n)[0]
1211 if i == f:
1219 if i == f:
1212 l.append(n)
1220 l.append(n)
1213 f = f * 2
1221 f = f * 2
1214 n = p
1222 n = p
1215 i += 1
1223 i += 1
1216
1224
1217 r.append(l)
1225 r.append(l)
1218
1226
1219 return r
1227 return r
1220
1228
1221 def findincoming(self, remote, base=None, heads=None, force=False):
1229 def findincoming(self, remote, base=None, heads=None, force=False):
1222 """Return list of roots of the subsets of missing nodes from remote
1230 """Return list of roots of the subsets of missing nodes from remote
1223
1231
1224 If base dict is specified, assume that these nodes and their parents
1232 If base dict is specified, assume that these nodes and their parents
1225 exist on the remote side and that no child of a node of base exists
1233 exist on the remote side and that no child of a node of base exists
1226 in both remote and self.
1234 in both remote and self.
1227 Furthermore base will be updated to include the nodes that exists
1235 Furthermore base will be updated to include the nodes that exists
1228 in self and remote but no children exists in self and remote.
1236 in self and remote but no children exists in self and remote.
1229 If a list of heads is specified, return only nodes which are heads
1237 If a list of heads is specified, return only nodes which are heads
1230 or ancestors of these heads.
1238 or ancestors of these heads.
1231
1239
1232 All the ancestors of base are in self and in remote.
1240 All the ancestors of base are in self and in remote.
1233 All the descendants of the list returned are missing in self.
1241 All the descendants of the list returned are missing in self.
1234 (and so we know that the rest of the nodes are missing in remote, see
1242 (and so we know that the rest of the nodes are missing in remote, see
1235 outgoing)
1243 outgoing)
1236 """
1244 """
1237 m = self.changelog.nodemap
1245 m = self.changelog.nodemap
1238 search = []
1246 search = []
1239 fetch = {}
1247 fetch = {}
1240 seen = {}
1248 seen = {}
1241 seenbranch = {}
1249 seenbranch = {}
1242 if base == None:
1250 if base == None:
1243 base = {}
1251 base = {}
1244
1252
1245 if not heads:
1253 if not heads:
1246 heads = remote.heads()
1254 heads = remote.heads()
1247
1255
1248 if self.changelog.tip() == nullid:
1256 if self.changelog.tip() == nullid:
1249 base[nullid] = 1
1257 base[nullid] = 1
1250 if heads != [nullid]:
1258 if heads != [nullid]:
1251 return [nullid]
1259 return [nullid]
1252 return []
1260 return []
1253
1261
1254 # assume we're closer to the tip than the root
1262 # assume we're closer to the tip than the root
1255 # and start by examining the heads
1263 # and start by examining the heads
1256 self.ui.status(_("searching for changes\n"))
1264 self.ui.status(_("searching for changes\n"))
1257
1265
1258 unknown = []
1266 unknown = []
1259 for h in heads:
1267 for h in heads:
1260 if h not in m:
1268 if h not in m:
1261 unknown.append(h)
1269 unknown.append(h)
1262 else:
1270 else:
1263 base[h] = 1
1271 base[h] = 1
1264
1272
1265 if not unknown:
1273 if not unknown:
1266 return []
1274 return []
1267
1275
1268 req = dict.fromkeys(unknown)
1276 req = dict.fromkeys(unknown)
1269 reqcnt = 0
1277 reqcnt = 0
1270
1278
1271 # search through remote branches
1279 # search through remote branches
1272 # a 'branch' here is a linear segment of history, with four parts:
1280 # a 'branch' here is a linear segment of history, with four parts:
1273 # head, root, first parent, second parent
1281 # head, root, first parent, second parent
1274 # (a branch always has two parents (or none) by definition)
1282 # (a branch always has two parents (or none) by definition)
1275 unknown = remote.branches(unknown)
1283 unknown = remote.branches(unknown)
1276 while unknown:
1284 while unknown:
1277 r = []
1285 r = []
1278 while unknown:
1286 while unknown:
1279 n = unknown.pop(0)
1287 n = unknown.pop(0)
1280 if n[0] in seen:
1288 if n[0] in seen:
1281 continue
1289 continue
1282
1290
1283 self.ui.debug(_("examining %s:%s\n")
1291 self.ui.debug(_("examining %s:%s\n")
1284 % (short(n[0]), short(n[1])))
1292 % (short(n[0]), short(n[1])))
1285 if n[0] == nullid: # found the end of the branch
1293 if n[0] == nullid: # found the end of the branch
1286 pass
1294 pass
1287 elif n in seenbranch:
1295 elif n in seenbranch:
1288 self.ui.debug(_("branch already found\n"))
1296 self.ui.debug(_("branch already found\n"))
1289 continue
1297 continue
1290 elif n[1] and n[1] in m: # do we know the base?
1298 elif n[1] and n[1] in m: # do we know the base?
1291 self.ui.debug(_("found incomplete branch %s:%s\n")
1299 self.ui.debug(_("found incomplete branch %s:%s\n")
1292 % (short(n[0]), short(n[1])))
1300 % (short(n[0]), short(n[1])))
1293 search.append(n) # schedule branch range for scanning
1301 search.append(n) # schedule branch range for scanning
1294 seenbranch[n] = 1
1302 seenbranch[n] = 1
1295 else:
1303 else:
1296 if n[1] not in seen and n[1] not in fetch:
1304 if n[1] not in seen and n[1] not in fetch:
1297 if n[2] in m and n[3] in m:
1305 if n[2] in m and n[3] in m:
1298 self.ui.debug(_("found new changeset %s\n") %
1306 self.ui.debug(_("found new changeset %s\n") %
1299 short(n[1]))
1307 short(n[1]))
1300 fetch[n[1]] = 1 # earliest unknown
1308 fetch[n[1]] = 1 # earliest unknown
1301 for p in n[2:4]:
1309 for p in n[2:4]:
1302 if p in m:
1310 if p in m:
1303 base[p] = 1 # latest known
1311 base[p] = 1 # latest known
1304
1312
1305 for p in n[2:4]:
1313 for p in n[2:4]:
1306 if p not in req and p not in m:
1314 if p not in req and p not in m:
1307 r.append(p)
1315 r.append(p)
1308 req[p] = 1
1316 req[p] = 1
1309 seen[n[0]] = 1
1317 seen[n[0]] = 1
1310
1318
1311 if r:
1319 if r:
1312 reqcnt += 1
1320 reqcnt += 1
1313 self.ui.debug(_("request %d: %s\n") %
1321 self.ui.debug(_("request %d: %s\n") %
1314 (reqcnt, " ".join(map(short, r))))
1322 (reqcnt, " ".join(map(short, r))))
1315 for p in xrange(0, len(r), 10):
1323 for p in xrange(0, len(r), 10):
1316 for b in remote.branches(r[p:p+10]):
1324 for b in remote.branches(r[p:p+10]):
1317 self.ui.debug(_("received %s:%s\n") %
1325 self.ui.debug(_("received %s:%s\n") %
1318 (short(b[0]), short(b[1])))
1326 (short(b[0]), short(b[1])))
1319 unknown.append(b)
1327 unknown.append(b)
1320
1328
1321 # do binary search on the branches we found
1329 # do binary search on the branches we found
1322 while search:
1330 while search:
1323 n = search.pop(0)
1331 n = search.pop(0)
1324 reqcnt += 1
1332 reqcnt += 1
1325 l = remote.between([(n[0], n[1])])[0]
1333 l = remote.between([(n[0], n[1])])[0]
1326 l.append(n[1])
1334 l.append(n[1])
1327 p = n[0]
1335 p = n[0]
1328 f = 1
1336 f = 1
1329 for i in l:
1337 for i in l:
1330 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1338 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1331 if i in m:
1339 if i in m:
1332 if f <= 2:
1340 if f <= 2:
1333 self.ui.debug(_("found new branch changeset %s\n") %
1341 self.ui.debug(_("found new branch changeset %s\n") %
1334 short(p))
1342 short(p))
1335 fetch[p] = 1
1343 fetch[p] = 1
1336 base[i] = 1
1344 base[i] = 1
1337 else:
1345 else:
1338 self.ui.debug(_("narrowed branch search to %s:%s\n")
1346 self.ui.debug(_("narrowed branch search to %s:%s\n")
1339 % (short(p), short(i)))
1347 % (short(p), short(i)))
1340 search.append((p, i))
1348 search.append((p, i))
1341 break
1349 break
1342 p, f = i, f * 2
1350 p, f = i, f * 2
1343
1351
1344 # sanity check our fetch list
1352 # sanity check our fetch list
1345 for f in fetch.keys():
1353 for f in fetch.keys():
1346 if f in m:
1354 if f in m:
1347 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1355 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1348
1356
1349 if base.keys() == [nullid]:
1357 if base.keys() == [nullid]:
1350 if force:
1358 if force:
1351 self.ui.warn(_("warning: repository is unrelated\n"))
1359 self.ui.warn(_("warning: repository is unrelated\n"))
1352 else:
1360 else:
1353 raise util.Abort(_("repository is unrelated"))
1361 raise util.Abort(_("repository is unrelated"))
1354
1362
1355 self.ui.debug(_("found new changesets starting at ") +
1363 self.ui.debug(_("found new changesets starting at ") +
1356 " ".join([short(f) for f in fetch]) + "\n")
1364 " ".join([short(f) for f in fetch]) + "\n")
1357
1365
1358 self.ui.debug(_("%d total queries\n") % reqcnt)
1366 self.ui.debug(_("%d total queries\n") % reqcnt)
1359
1367
1360 return fetch.keys()
1368 return fetch.keys()
1361
1369
1362 def findoutgoing(self, remote, base=None, heads=None, force=False):
1370 def findoutgoing(self, remote, base=None, heads=None, force=False):
1363 """Return list of nodes that are roots of subsets not in remote
1371 """Return list of nodes that are roots of subsets not in remote
1364
1372
1365 If base dict is specified, assume that these nodes and their parents
1373 If base dict is specified, assume that these nodes and their parents
1366 exist on the remote side.
1374 exist on the remote side.
1367 If a list of heads is specified, return only nodes which are heads
1375 If a list of heads is specified, return only nodes which are heads
1368 or ancestors of these heads, and return a second element which
1376 or ancestors of these heads, and return a second element which
1369 contains all remote heads which get new children.
1377 contains all remote heads which get new children.
1370 """
1378 """
1371 if base == None:
1379 if base == None:
1372 base = {}
1380 base = {}
1373 self.findincoming(remote, base, heads, force=force)
1381 self.findincoming(remote, base, heads, force=force)
1374
1382
1375 self.ui.debug(_("common changesets up to ")
1383 self.ui.debug(_("common changesets up to ")
1376 + " ".join(map(short, base.keys())) + "\n")
1384 + " ".join(map(short, base.keys())) + "\n")
1377
1385
1378 remain = dict.fromkeys(self.changelog.nodemap)
1386 remain = dict.fromkeys(self.changelog.nodemap)
1379
1387
1380 # prune everything remote has from the tree
1388 # prune everything remote has from the tree
1381 del remain[nullid]
1389 del remain[nullid]
1382 remove = base.keys()
1390 remove = base.keys()
1383 while remove:
1391 while remove:
1384 n = remove.pop(0)
1392 n = remove.pop(0)
1385 if n in remain:
1393 if n in remain:
1386 del remain[n]
1394 del remain[n]
1387 for p in self.changelog.parents(n):
1395 for p in self.changelog.parents(n):
1388 remove.append(p)
1396 remove.append(p)
1389
1397
1390 # find every node whose parents have been pruned
1398 # find every node whose parents have been pruned
1391 subset = []
1399 subset = []
1392 # find every remote head that will get new children
1400 # find every remote head that will get new children
1393 updated_heads = {}
1401 updated_heads = {}
1394 for n in remain:
1402 for n in remain:
1395 p1, p2 = self.changelog.parents(n)
1403 p1, p2 = self.changelog.parents(n)
1396 if p1 not in remain and p2 not in remain:
1404 if p1 not in remain and p2 not in remain:
1397 subset.append(n)
1405 subset.append(n)
1398 if heads:
1406 if heads:
1399 if p1 in heads:
1407 if p1 in heads:
1400 updated_heads[p1] = True
1408 updated_heads[p1] = True
1401 if p2 in heads:
1409 if p2 in heads:
1402 updated_heads[p2] = True
1410 updated_heads[p2] = True
1403
1411
1404 # this is the set of all roots we have to push
1412 # this is the set of all roots we have to push
1405 if heads:
1413 if heads:
1406 return subset, updated_heads.keys()
1414 return subset, updated_heads.keys()
1407 else:
1415 else:
1408 return subset
1416 return subset
1409
1417
1410 def pull(self, remote, heads=None, force=False):
1418 def pull(self, remote, heads=None, force=False):
1411 lock = self.lock()
1419 lock = self.lock()
1412 try:
1420 try:
1413 fetch = self.findincoming(remote, heads=heads, force=force)
1421 fetch = self.findincoming(remote, heads=heads, force=force)
1414 if fetch == [nullid]:
1422 if fetch == [nullid]:
1415 self.ui.status(_("requesting all changes\n"))
1423 self.ui.status(_("requesting all changes\n"))
1416
1424
1417 if not fetch:
1425 if not fetch:
1418 self.ui.status(_("no changes found\n"))
1426 self.ui.status(_("no changes found\n"))
1419 return 0
1427 return 0
1420
1428
1421 if heads is None:
1429 if heads is None:
1422 cg = remote.changegroup(fetch, 'pull')
1430 cg = remote.changegroup(fetch, 'pull')
1423 else:
1431 else:
1424 if 'changegroupsubset' not in remote.capabilities:
1432 if 'changegroupsubset' not in remote.capabilities:
1425 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1433 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1426 cg = remote.changegroupsubset(fetch, heads, 'pull')
1434 cg = remote.changegroupsubset(fetch, heads, 'pull')
1427 return self.addchangegroup(cg, 'pull', remote.url())
1435 return self.addchangegroup(cg, 'pull', remote.url())
1428 finally:
1436 finally:
1429 del lock
1437 del lock
1430
1438
1431 def push(self, remote, force=False, revs=None):
1439 def push(self, remote, force=False, revs=None):
1432 # there are two ways to push to remote repo:
1440 # there are two ways to push to remote repo:
1433 #
1441 #
1434 # addchangegroup assumes local user can lock remote
1442 # addchangegroup assumes local user can lock remote
1435 # repo (local filesystem, old ssh servers).
1443 # repo (local filesystem, old ssh servers).
1436 #
1444 #
1437 # unbundle assumes local user cannot lock remote repo (new ssh
1445 # unbundle assumes local user cannot lock remote repo (new ssh
1438 # servers, http servers).
1446 # servers, http servers).
1439
1447
1440 if remote.capable('unbundle'):
1448 if remote.capable('unbundle'):
1441 return self.push_unbundle(remote, force, revs)
1449 return self.push_unbundle(remote, force, revs)
1442 return self.push_addchangegroup(remote, force, revs)
1450 return self.push_addchangegroup(remote, force, revs)
1443
1451
1444 def prepush(self, remote, force, revs):
1452 def prepush(self, remote, force, revs):
1445 base = {}
1453 base = {}
1446 remote_heads = remote.heads()
1454 remote_heads = remote.heads()
1447 inc = self.findincoming(remote, base, remote_heads, force=force)
1455 inc = self.findincoming(remote, base, remote_heads, force=force)
1448
1456
1449 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1457 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1450 if revs is not None:
1458 if revs is not None:
1451 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1459 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1452 else:
1460 else:
1453 bases, heads = update, self.changelog.heads()
1461 bases, heads = update, self.changelog.heads()
1454
1462
1455 if not bases:
1463 if not bases:
1456 self.ui.status(_("no changes found\n"))
1464 self.ui.status(_("no changes found\n"))
1457 return None, 1
1465 return None, 1
1458 elif not force:
1466 elif not force:
1459 # check if we're creating new remote heads
1467 # check if we're creating new remote heads
1460 # to be a remote head after push, node must be either
1468 # to be a remote head after push, node must be either
1461 # - unknown locally
1469 # - unknown locally
1462 # - a local outgoing head descended from update
1470 # - a local outgoing head descended from update
1463 # - a remote head that's known locally and not
1471 # - a remote head that's known locally and not
1464 # ancestral to an outgoing head
1472 # ancestral to an outgoing head
1465
1473
1466 warn = 0
1474 warn = 0
1467
1475
1468 if remote_heads == [nullid]:
1476 if remote_heads == [nullid]:
1469 warn = 0
1477 warn = 0
1470 elif not revs and len(heads) > len(remote_heads):
1478 elif not revs and len(heads) > len(remote_heads):
1471 warn = 1
1479 warn = 1
1472 else:
1480 else:
1473 newheads = list(heads)
1481 newheads = list(heads)
1474 for r in remote_heads:
1482 for r in remote_heads:
1475 if r in self.changelog.nodemap:
1483 if r in self.changelog.nodemap:
1476 desc = self.changelog.heads(r, heads)
1484 desc = self.changelog.heads(r, heads)
1477 l = [h for h in heads if h in desc]
1485 l = [h for h in heads if h in desc]
1478 if not l:
1486 if not l:
1479 newheads.append(r)
1487 newheads.append(r)
1480 else:
1488 else:
1481 newheads.append(r)
1489 newheads.append(r)
1482 if len(newheads) > len(remote_heads):
1490 if len(newheads) > len(remote_heads):
1483 warn = 1
1491 warn = 1
1484
1492
1485 if warn:
1493 if warn:
1486 self.ui.warn(_("abort: push creates new remote heads!\n"))
1494 self.ui.warn(_("abort: push creates new remote heads!\n"))
1487 self.ui.status(_("(did you forget to merge?"
1495 self.ui.status(_("(did you forget to merge?"
1488 " use push -f to force)\n"))
1496 " use push -f to force)\n"))
1489 return None, 0
1497 return None, 0
1490 elif inc:
1498 elif inc:
1491 self.ui.warn(_("note: unsynced remote changes!\n"))
1499 self.ui.warn(_("note: unsynced remote changes!\n"))
1492
1500
1493
1501
1494 if revs is None:
1502 if revs is None:
1495 cg = self.changegroup(update, 'push')
1503 cg = self.changegroup(update, 'push')
1496 else:
1504 else:
1497 cg = self.changegroupsubset(update, revs, 'push')
1505 cg = self.changegroupsubset(update, revs, 'push')
1498 return cg, remote_heads
1506 return cg, remote_heads
1499
1507
1500 def push_addchangegroup(self, remote, force, revs):
1508 def push_addchangegroup(self, remote, force, revs):
1501 lock = remote.lock()
1509 lock = remote.lock()
1502 try:
1510 try:
1503 ret = self.prepush(remote, force, revs)
1511 ret = self.prepush(remote, force, revs)
1504 if ret[0] is not None:
1512 if ret[0] is not None:
1505 cg, remote_heads = ret
1513 cg, remote_heads = ret
1506 return remote.addchangegroup(cg, 'push', self.url())
1514 return remote.addchangegroup(cg, 'push', self.url())
1507 return ret[1]
1515 return ret[1]
1508 finally:
1516 finally:
1509 del lock
1517 del lock
1510
1518
1511 def push_unbundle(self, remote, force, revs):
1519 def push_unbundle(self, remote, force, revs):
1512 # local repo finds heads on server, finds out what revs it
1520 # local repo finds heads on server, finds out what revs it
1513 # must push. once revs transferred, if server finds it has
1521 # must push. once revs transferred, if server finds it has
1514 # different heads (someone else won commit/push race), server
1522 # different heads (someone else won commit/push race), server
1515 # aborts.
1523 # aborts.
1516
1524
1517 ret = self.prepush(remote, force, revs)
1525 ret = self.prepush(remote, force, revs)
1518 if ret[0] is not None:
1526 if ret[0] is not None:
1519 cg, remote_heads = ret
1527 cg, remote_heads = ret
1520 if force: remote_heads = ['force']
1528 if force: remote_heads = ['force']
1521 return remote.unbundle(cg, remote_heads, 'push')
1529 return remote.unbundle(cg, remote_heads, 'push')
1522 return ret[1]
1530 return ret[1]
1523
1531
1524 def changegroupinfo(self, nodes, source):
1532 def changegroupinfo(self, nodes, source):
1525 if self.ui.verbose or source == 'bundle':
1533 if self.ui.verbose or source == 'bundle':
1526 self.ui.status(_("%d changesets found\n") % len(nodes))
1534 self.ui.status(_("%d changesets found\n") % len(nodes))
1527 if self.ui.debugflag:
1535 if self.ui.debugflag:
1528 self.ui.debug(_("List of changesets:\n"))
1536 self.ui.debug(_("List of changesets:\n"))
1529 for node in nodes:
1537 for node in nodes:
1530 self.ui.debug("%s\n" % hex(node))
1538 self.ui.debug("%s\n" % hex(node))
1531
1539
1532 def changegroupsubset(self, bases, heads, source, extranodes=None):
1540 def changegroupsubset(self, bases, heads, source, extranodes=None):
1533 """This function generates a changegroup consisting of all the nodes
1541 """This function generates a changegroup consisting of all the nodes
1534 that are descendents of any of the bases, and ancestors of any of
1542 that are descendents of any of the bases, and ancestors of any of
1535 the heads.
1543 the heads.
1536
1544
1537 It is fairly complex as determining which filenodes and which
1545 It is fairly complex as determining which filenodes and which
1538 manifest nodes need to be included for the changeset to be complete
1546 manifest nodes need to be included for the changeset to be complete
1539 is non-trivial.
1547 is non-trivial.
1540
1548
1541 Another wrinkle is doing the reverse, figuring out which changeset in
1549 Another wrinkle is doing the reverse, figuring out which changeset in
1542 the changegroup a particular filenode or manifestnode belongs to.
1550 the changegroup a particular filenode or manifestnode belongs to.
1543
1551
1544 The caller can specify some nodes that must be included in the
1552 The caller can specify some nodes that must be included in the
1545 changegroup using the extranodes argument. It should be a dict
1553 changegroup using the extranodes argument. It should be a dict
1546 where the keys are the filenames (or 1 for the manifest), and the
1554 where the keys are the filenames (or 1 for the manifest), and the
1547 values are lists of (node, linknode) tuples, where node is a wanted
1555 values are lists of (node, linknode) tuples, where node is a wanted
1548 node and linknode is the changelog node that should be transmitted as
1556 node and linknode is the changelog node that should be transmitted as
1549 the linkrev.
1557 the linkrev.
1550 """
1558 """
1551
1559
1552 self.hook('preoutgoing', throw=True, source=source)
1560 self.hook('preoutgoing', throw=True, source=source)
1553
1561
1554 # Set up some initial variables
1562 # Set up some initial variables
1555 # Make it easy to refer to self.changelog
1563 # Make it easy to refer to self.changelog
1556 cl = self.changelog
1564 cl = self.changelog
1557 # msng is short for missing - compute the list of changesets in this
1565 # msng is short for missing - compute the list of changesets in this
1558 # changegroup.
1566 # changegroup.
1559 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1567 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1560 self.changegroupinfo(msng_cl_lst, source)
1568 self.changegroupinfo(msng_cl_lst, source)
1561 # Some bases may turn out to be superfluous, and some heads may be
1569 # Some bases may turn out to be superfluous, and some heads may be
1562 # too. nodesbetween will return the minimal set of bases and heads
1570 # too. nodesbetween will return the minimal set of bases and heads
1563 # necessary to re-create the changegroup.
1571 # necessary to re-create the changegroup.
1564
1572
1565 # Known heads are the list of heads that it is assumed the recipient
1573 # Known heads are the list of heads that it is assumed the recipient
1566 # of this changegroup will know about.
1574 # of this changegroup will know about.
1567 knownheads = {}
1575 knownheads = {}
1568 # We assume that all parents of bases are known heads.
1576 # We assume that all parents of bases are known heads.
1569 for n in bases:
1577 for n in bases:
1570 for p in cl.parents(n):
1578 for p in cl.parents(n):
1571 if p != nullid:
1579 if p != nullid:
1572 knownheads[p] = 1
1580 knownheads[p] = 1
1573 knownheads = knownheads.keys()
1581 knownheads = knownheads.keys()
1574 if knownheads:
1582 if knownheads:
1575 # Now that we know what heads are known, we can compute which
1583 # Now that we know what heads are known, we can compute which
1576 # changesets are known. The recipient must know about all
1584 # changesets are known. The recipient must know about all
1577 # changesets required to reach the known heads from the null
1585 # changesets required to reach the known heads from the null
1578 # changeset.
1586 # changeset.
1579 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1587 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1580 junk = None
1588 junk = None
1581 # Transform the list into an ersatz set.
1589 # Transform the list into an ersatz set.
1582 has_cl_set = dict.fromkeys(has_cl_set)
1590 has_cl_set = dict.fromkeys(has_cl_set)
1583 else:
1591 else:
1584 # If there were no known heads, the recipient cannot be assumed to
1592 # If there were no known heads, the recipient cannot be assumed to
1585 # know about any changesets.
1593 # know about any changesets.
1586 has_cl_set = {}
1594 has_cl_set = {}
1587
1595
1588 # Make it easy to refer to self.manifest
1596 # Make it easy to refer to self.manifest
1589 mnfst = self.manifest
1597 mnfst = self.manifest
1590 # We don't know which manifests are missing yet
1598 # We don't know which manifests are missing yet
1591 msng_mnfst_set = {}
1599 msng_mnfst_set = {}
1592 # Nor do we know which filenodes are missing.
1600 # Nor do we know which filenodes are missing.
1593 msng_filenode_set = {}
1601 msng_filenode_set = {}
1594
1602
1595 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1603 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1596 junk = None
1604 junk = None
1597
1605
1598 # A changeset always belongs to itself, so the changenode lookup
1606 # A changeset always belongs to itself, so the changenode lookup
1599 # function for a changenode is identity.
1607 # function for a changenode is identity.
1600 def identity(x):
1608 def identity(x):
1601 return x
1609 return x
1602
1610
1603 # A function generating function. Sets up an environment for the
1611 # A function generating function. Sets up an environment for the
1604 # inner function.
1612 # inner function.
1605 def cmp_by_rev_func(revlog):
1613 def cmp_by_rev_func(revlog):
1606 # Compare two nodes by their revision number in the environment's
1614 # Compare two nodes by their revision number in the environment's
1607 # revision history. Since the revision number both represents the
1615 # revision history. Since the revision number both represents the
1608 # most efficient order to read the nodes in, and represents a
1616 # most efficient order to read the nodes in, and represents a
1609 # topological sorting of the nodes, this function is often useful.
1617 # topological sorting of the nodes, this function is often useful.
1610 def cmp_by_rev(a, b):
1618 def cmp_by_rev(a, b):
1611 return cmp(revlog.rev(a), revlog.rev(b))
1619 return cmp(revlog.rev(a), revlog.rev(b))
1612 return cmp_by_rev
1620 return cmp_by_rev
1613
1621
1614 # If we determine that a particular file or manifest node must be a
1622 # If we determine that a particular file or manifest node must be a
1615 # node that the recipient of the changegroup will already have, we can
1623 # node that the recipient of the changegroup will already have, we can
1616 # also assume the recipient will have all the parents. This function
1624 # also assume the recipient will have all the parents. This function
1617 # prunes them from the set of missing nodes.
1625 # prunes them from the set of missing nodes.
1618 def prune_parents(revlog, hasset, msngset):
1626 def prune_parents(revlog, hasset, msngset):
1619 haslst = hasset.keys()
1627 haslst = hasset.keys()
1620 haslst.sort(cmp_by_rev_func(revlog))
1628 haslst.sort(cmp_by_rev_func(revlog))
1621 for node in haslst:
1629 for node in haslst:
1622 parentlst = [p for p in revlog.parents(node) if p != nullid]
1630 parentlst = [p for p in revlog.parents(node) if p != nullid]
1623 while parentlst:
1631 while parentlst:
1624 n = parentlst.pop()
1632 n = parentlst.pop()
1625 if n not in hasset:
1633 if n not in hasset:
1626 hasset[n] = 1
1634 hasset[n] = 1
1627 p = [p for p in revlog.parents(n) if p != nullid]
1635 p = [p for p in revlog.parents(n) if p != nullid]
1628 parentlst.extend(p)
1636 parentlst.extend(p)
1629 for n in hasset:
1637 for n in hasset:
1630 msngset.pop(n, None)
1638 msngset.pop(n, None)
1631
1639
1632 # This is a function generating function used to set up an environment
1640 # This is a function generating function used to set up an environment
1633 # for the inner function to execute in.
1641 # for the inner function to execute in.
1634 def manifest_and_file_collector(changedfileset):
1642 def manifest_and_file_collector(changedfileset):
1635 # This is an information gathering function that gathers
1643 # This is an information gathering function that gathers
1636 # information from each changeset node that goes out as part of
1644 # information from each changeset node that goes out as part of
1637 # the changegroup. The information gathered is a list of which
1645 # the changegroup. The information gathered is a list of which
1638 # manifest nodes are potentially required (the recipient may
1646 # manifest nodes are potentially required (the recipient may
1639 # already have them) and total list of all files which were
1647 # already have them) and total list of all files which were
1640 # changed in any changeset in the changegroup.
1648 # changed in any changeset in the changegroup.
1641 #
1649 #
1642 # We also remember the first changenode we saw any manifest
1650 # We also remember the first changenode we saw any manifest
1643 # referenced by so we can later determine which changenode 'owns'
1651 # referenced by so we can later determine which changenode 'owns'
1644 # the manifest.
1652 # the manifest.
1645 def collect_manifests_and_files(clnode):
1653 def collect_manifests_and_files(clnode):
1646 c = cl.read(clnode)
1654 c = cl.read(clnode)
1647 for f in c[3]:
1655 for f in c[3]:
1648 # This is to make sure we only have one instance of each
1656 # This is to make sure we only have one instance of each
1649 # filename string for each filename.
1657 # filename string for each filename.
1650 changedfileset.setdefault(f, f)
1658 changedfileset.setdefault(f, f)
1651 msng_mnfst_set.setdefault(c[0], clnode)
1659 msng_mnfst_set.setdefault(c[0], clnode)
1652 return collect_manifests_and_files
1660 return collect_manifests_and_files
1653
1661
1654 # Figure out which manifest nodes (of the ones we think might be part
1662 # Figure out which manifest nodes (of the ones we think might be part
1655 # of the changegroup) the recipient must know about and remove them
1663 # of the changegroup) the recipient must know about and remove them
1656 # from the changegroup.
1664 # from the changegroup.
1657 def prune_manifests():
1665 def prune_manifests():
1658 has_mnfst_set = {}
1666 has_mnfst_set = {}
1659 for n in msng_mnfst_set:
1667 for n in msng_mnfst_set:
1660 # If a 'missing' manifest thinks it belongs to a changenode
1668 # If a 'missing' manifest thinks it belongs to a changenode
1661 # the recipient is assumed to have, obviously the recipient
1669 # the recipient is assumed to have, obviously the recipient
1662 # must have that manifest.
1670 # must have that manifest.
1663 linknode = cl.node(mnfst.linkrev(n))
1671 linknode = cl.node(mnfst.linkrev(n))
1664 if linknode in has_cl_set:
1672 if linknode in has_cl_set:
1665 has_mnfst_set[n] = 1
1673 has_mnfst_set[n] = 1
1666 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1674 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1667
1675
1668 # Use the information collected in collect_manifests_and_files to say
1676 # Use the information collected in collect_manifests_and_files to say
1669 # which changenode any manifestnode belongs to.
1677 # which changenode any manifestnode belongs to.
1670 def lookup_manifest_link(mnfstnode):
1678 def lookup_manifest_link(mnfstnode):
1671 return msng_mnfst_set[mnfstnode]
1679 return msng_mnfst_set[mnfstnode]
1672
1680
1673 # A function generating function that sets up the initial environment
1681 # A function generating function that sets up the initial environment
1674 # the inner function.
1682 # the inner function.
1675 def filenode_collector(changedfiles):
1683 def filenode_collector(changedfiles):
1676 next_rev = [0]
1684 next_rev = [0]
1677 # This gathers information from each manifestnode included in the
1685 # This gathers information from each manifestnode included in the
1678 # changegroup about which filenodes the manifest node references
1686 # changegroup about which filenodes the manifest node references
1679 # so we can include those in the changegroup too.
1687 # so we can include those in the changegroup too.
1680 #
1688 #
1681 # It also remembers which changenode each filenode belongs to. It
1689 # It also remembers which changenode each filenode belongs to. It
1682 # does this by assuming the a filenode belongs to the changenode
1690 # does this by assuming the a filenode belongs to the changenode
1683 # the first manifest that references it belongs to.
1691 # the first manifest that references it belongs to.
1684 def collect_msng_filenodes(mnfstnode):
1692 def collect_msng_filenodes(mnfstnode):
1685 r = mnfst.rev(mnfstnode)
1693 r = mnfst.rev(mnfstnode)
1686 if r == next_rev[0]:
1694 if r == next_rev[0]:
1687 # If the last rev we looked at was the one just previous,
1695 # If the last rev we looked at was the one just previous,
1688 # we only need to see a diff.
1696 # we only need to see a diff.
1689 deltamf = mnfst.readdelta(mnfstnode)
1697 deltamf = mnfst.readdelta(mnfstnode)
1690 # For each line in the delta
1698 # For each line in the delta
1691 for f, fnode in deltamf.items():
1699 for f, fnode in deltamf.items():
1692 f = changedfiles.get(f, None)
1700 f = changedfiles.get(f, None)
1693 # And if the file is in the list of files we care
1701 # And if the file is in the list of files we care
1694 # about.
1702 # about.
1695 if f is not None:
1703 if f is not None:
1696 # Get the changenode this manifest belongs to
1704 # Get the changenode this manifest belongs to
1697 clnode = msng_mnfst_set[mnfstnode]
1705 clnode = msng_mnfst_set[mnfstnode]
1698 # Create the set of filenodes for the file if
1706 # Create the set of filenodes for the file if
1699 # there isn't one already.
1707 # there isn't one already.
1700 ndset = msng_filenode_set.setdefault(f, {})
1708 ndset = msng_filenode_set.setdefault(f, {})
1701 # And set the filenode's changelog node to the
1709 # And set the filenode's changelog node to the
1702 # manifest's if it hasn't been set already.
1710 # manifest's if it hasn't been set already.
1703 ndset.setdefault(fnode, clnode)
1711 ndset.setdefault(fnode, clnode)
1704 else:
1712 else:
1705 # Otherwise we need a full manifest.
1713 # Otherwise we need a full manifest.
1706 m = mnfst.read(mnfstnode)
1714 m = mnfst.read(mnfstnode)
1707 # For every file in we care about.
1715 # For every file in we care about.
1708 for f in changedfiles:
1716 for f in changedfiles:
1709 fnode = m.get(f, None)
1717 fnode = m.get(f, None)
1710 # If it's in the manifest
1718 # If it's in the manifest
1711 if fnode is not None:
1719 if fnode is not None:
1712 # See comments above.
1720 # See comments above.
1713 clnode = msng_mnfst_set[mnfstnode]
1721 clnode = msng_mnfst_set[mnfstnode]
1714 ndset = msng_filenode_set.setdefault(f, {})
1722 ndset = msng_filenode_set.setdefault(f, {})
1715 ndset.setdefault(fnode, clnode)
1723 ndset.setdefault(fnode, clnode)
1716 # Remember the revision we hope to see next.
1724 # Remember the revision we hope to see next.
1717 next_rev[0] = r + 1
1725 next_rev[0] = r + 1
1718 return collect_msng_filenodes
1726 return collect_msng_filenodes
1719
1727
1720 # We have a list of filenodes we think we need for a file, lets remove
1728 # We have a list of filenodes we think we need for a file, lets remove
1721 # all those we now the recipient must have.
1729 # all those we now the recipient must have.
1722 def prune_filenodes(f, filerevlog):
1730 def prune_filenodes(f, filerevlog):
1723 msngset = msng_filenode_set[f]
1731 msngset = msng_filenode_set[f]
1724 hasset = {}
1732 hasset = {}
1725 # If a 'missing' filenode thinks it belongs to a changenode we
1733 # If a 'missing' filenode thinks it belongs to a changenode we
1726 # assume the recipient must have, then the recipient must have
1734 # assume the recipient must have, then the recipient must have
1727 # that filenode.
1735 # that filenode.
1728 for n in msngset:
1736 for n in msngset:
1729 clnode = cl.node(filerevlog.linkrev(n))
1737 clnode = cl.node(filerevlog.linkrev(n))
1730 if clnode in has_cl_set:
1738 if clnode in has_cl_set:
1731 hasset[n] = 1
1739 hasset[n] = 1
1732 prune_parents(filerevlog, hasset, msngset)
1740 prune_parents(filerevlog, hasset, msngset)
1733
1741
1734 # A function generator function that sets up the a context for the
1742 # A function generator function that sets up the a context for the
1735 # inner function.
1743 # inner function.
1736 def lookup_filenode_link_func(fname):
1744 def lookup_filenode_link_func(fname):
1737 msngset = msng_filenode_set[fname]
1745 msngset = msng_filenode_set[fname]
1738 # Lookup the changenode the filenode belongs to.
1746 # Lookup the changenode the filenode belongs to.
1739 def lookup_filenode_link(fnode):
1747 def lookup_filenode_link(fnode):
1740 return msngset[fnode]
1748 return msngset[fnode]
1741 return lookup_filenode_link
1749 return lookup_filenode_link
1742
1750
1743 # Add the nodes that were explicitly requested.
1751 # Add the nodes that were explicitly requested.
1744 def add_extra_nodes(name, nodes):
1752 def add_extra_nodes(name, nodes):
1745 if not extranodes or name not in extranodes:
1753 if not extranodes or name not in extranodes:
1746 return
1754 return
1747
1755
1748 for node, linknode in extranodes[name]:
1756 for node, linknode in extranodes[name]:
1749 if node not in nodes:
1757 if node not in nodes:
1750 nodes[node] = linknode
1758 nodes[node] = linknode
1751
1759
1752 # Now that we have all theses utility functions to help out and
1760 # Now that we have all theses utility functions to help out and
1753 # logically divide up the task, generate the group.
1761 # logically divide up the task, generate the group.
1754 def gengroup():
1762 def gengroup():
1755 # The set of changed files starts empty.
1763 # The set of changed files starts empty.
1756 changedfiles = {}
1764 changedfiles = {}
1757 # Create a changenode group generator that will call our functions
1765 # Create a changenode group generator that will call our functions
1758 # back to lookup the owning changenode and collect information.
1766 # back to lookup the owning changenode and collect information.
1759 group = cl.group(msng_cl_lst, identity,
1767 group = cl.group(msng_cl_lst, identity,
1760 manifest_and_file_collector(changedfiles))
1768 manifest_and_file_collector(changedfiles))
1761 for chnk in group:
1769 for chnk in group:
1762 yield chnk
1770 yield chnk
1763
1771
1764 # The list of manifests has been collected by the generator
1772 # The list of manifests has been collected by the generator
1765 # calling our functions back.
1773 # calling our functions back.
1766 prune_manifests()
1774 prune_manifests()
1767 add_extra_nodes(1, msng_mnfst_set)
1775 add_extra_nodes(1, msng_mnfst_set)
1768 msng_mnfst_lst = msng_mnfst_set.keys()
1776 msng_mnfst_lst = msng_mnfst_set.keys()
1769 # Sort the manifestnodes by revision number.
1777 # Sort the manifestnodes by revision number.
1770 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1778 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1771 # Create a generator for the manifestnodes that calls our lookup
1779 # Create a generator for the manifestnodes that calls our lookup
1772 # and data collection functions back.
1780 # and data collection functions back.
1773 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1781 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1774 filenode_collector(changedfiles))
1782 filenode_collector(changedfiles))
1775 for chnk in group:
1783 for chnk in group:
1776 yield chnk
1784 yield chnk
1777
1785
1778 # These are no longer needed, dereference and toss the memory for
1786 # These are no longer needed, dereference and toss the memory for
1779 # them.
1787 # them.
1780 msng_mnfst_lst = None
1788 msng_mnfst_lst = None
1781 msng_mnfst_set.clear()
1789 msng_mnfst_set.clear()
1782
1790
1783 if extranodes:
1791 if extranodes:
1784 for fname in extranodes:
1792 for fname in extranodes:
1785 if isinstance(fname, int):
1793 if isinstance(fname, int):
1786 continue
1794 continue
1787 add_extra_nodes(fname,
1795 add_extra_nodes(fname,
1788 msng_filenode_set.setdefault(fname, {}))
1796 msng_filenode_set.setdefault(fname, {}))
1789 changedfiles[fname] = 1
1797 changedfiles[fname] = 1
1790 # Go through all our files in order sorted by name.
1798 # Go through all our files in order sorted by name.
1791 for fname in util.sort(changedfiles):
1799 for fname in util.sort(changedfiles):
1792 filerevlog = self.file(fname)
1800 filerevlog = self.file(fname)
1793 if not len(filerevlog):
1801 if not len(filerevlog):
1794 raise util.Abort(_("empty or missing revlog for %s") % fname)
1802 raise util.Abort(_("empty or missing revlog for %s") % fname)
1795 # Toss out the filenodes that the recipient isn't really
1803 # Toss out the filenodes that the recipient isn't really
1796 # missing.
1804 # missing.
1797 if fname in msng_filenode_set:
1805 if fname in msng_filenode_set:
1798 prune_filenodes(fname, filerevlog)
1806 prune_filenodes(fname, filerevlog)
1799 msng_filenode_lst = msng_filenode_set[fname].keys()
1807 msng_filenode_lst = msng_filenode_set[fname].keys()
1800 else:
1808 else:
1801 msng_filenode_lst = []
1809 msng_filenode_lst = []
1802 # If any filenodes are left, generate the group for them,
1810 # If any filenodes are left, generate the group for them,
1803 # otherwise don't bother.
1811 # otherwise don't bother.
1804 if len(msng_filenode_lst) > 0:
1812 if len(msng_filenode_lst) > 0:
1805 yield changegroup.chunkheader(len(fname))
1813 yield changegroup.chunkheader(len(fname))
1806 yield fname
1814 yield fname
1807 # Sort the filenodes by their revision #
1815 # Sort the filenodes by their revision #
1808 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1816 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1809 # Create a group generator and only pass in a changenode
1817 # Create a group generator and only pass in a changenode
1810 # lookup function as we need to collect no information
1818 # lookup function as we need to collect no information
1811 # from filenodes.
1819 # from filenodes.
1812 group = filerevlog.group(msng_filenode_lst,
1820 group = filerevlog.group(msng_filenode_lst,
1813 lookup_filenode_link_func(fname))
1821 lookup_filenode_link_func(fname))
1814 for chnk in group:
1822 for chnk in group:
1815 yield chnk
1823 yield chnk
1816 if fname in msng_filenode_set:
1824 if fname in msng_filenode_set:
1817 # Don't need this anymore, toss it to free memory.
1825 # Don't need this anymore, toss it to free memory.
1818 del msng_filenode_set[fname]
1826 del msng_filenode_set[fname]
1819 # Signal that no more groups are left.
1827 # Signal that no more groups are left.
1820 yield changegroup.closechunk()
1828 yield changegroup.closechunk()
1821
1829
1822 if msng_cl_lst:
1830 if msng_cl_lst:
1823 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1831 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1824
1832
1825 return util.chunkbuffer(gengroup())
1833 return util.chunkbuffer(gengroup())
1826
1834
1827 def changegroup(self, basenodes, source):
1835 def changegroup(self, basenodes, source):
1828 """Generate a changegroup of all nodes that we have that a recipient
1836 """Generate a changegroup of all nodes that we have that a recipient
1829 doesn't.
1837 doesn't.
1830
1838
1831 This is much easier than the previous function as we can assume that
1839 This is much easier than the previous function as we can assume that
1832 the recipient has any changenode we aren't sending them."""
1840 the recipient has any changenode we aren't sending them."""
1833
1841
1834 self.hook('preoutgoing', throw=True, source=source)
1842 self.hook('preoutgoing', throw=True, source=source)
1835
1843
1836 cl = self.changelog
1844 cl = self.changelog
1837 nodes = cl.nodesbetween(basenodes, None)[0]
1845 nodes = cl.nodesbetween(basenodes, None)[0]
1838 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1846 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1839 self.changegroupinfo(nodes, source)
1847 self.changegroupinfo(nodes, source)
1840
1848
1841 def identity(x):
1849 def identity(x):
1842 return x
1850 return x
1843
1851
1844 def gennodelst(log):
1852 def gennodelst(log):
1845 for r in log:
1853 for r in log:
1846 n = log.node(r)
1854 n = log.node(r)
1847 if log.linkrev(n) in revset:
1855 if log.linkrev(n) in revset:
1848 yield n
1856 yield n
1849
1857
1850 def changed_file_collector(changedfileset):
1858 def changed_file_collector(changedfileset):
1851 def collect_changed_files(clnode):
1859 def collect_changed_files(clnode):
1852 c = cl.read(clnode)
1860 c = cl.read(clnode)
1853 for fname in c[3]:
1861 for fname in c[3]:
1854 changedfileset[fname] = 1
1862 changedfileset[fname] = 1
1855 return collect_changed_files
1863 return collect_changed_files
1856
1864
1857 def lookuprevlink_func(revlog):
1865 def lookuprevlink_func(revlog):
1858 def lookuprevlink(n):
1866 def lookuprevlink(n):
1859 return cl.node(revlog.linkrev(n))
1867 return cl.node(revlog.linkrev(n))
1860 return lookuprevlink
1868 return lookuprevlink
1861
1869
1862 def gengroup():
1870 def gengroup():
1863 # construct a list of all changed files
1871 # construct a list of all changed files
1864 changedfiles = {}
1872 changedfiles = {}
1865
1873
1866 for chnk in cl.group(nodes, identity,
1874 for chnk in cl.group(nodes, identity,
1867 changed_file_collector(changedfiles)):
1875 changed_file_collector(changedfiles)):
1868 yield chnk
1876 yield chnk
1869
1877
1870 mnfst = self.manifest
1878 mnfst = self.manifest
1871 nodeiter = gennodelst(mnfst)
1879 nodeiter = gennodelst(mnfst)
1872 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1880 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1873 yield chnk
1881 yield chnk
1874
1882
1875 for fname in util.sort(changedfiles):
1883 for fname in util.sort(changedfiles):
1876 filerevlog = self.file(fname)
1884 filerevlog = self.file(fname)
1877 if not len(filerevlog):
1885 if not len(filerevlog):
1878 raise util.Abort(_("empty or missing revlog for %s") % fname)
1886 raise util.Abort(_("empty or missing revlog for %s") % fname)
1879 nodeiter = gennodelst(filerevlog)
1887 nodeiter = gennodelst(filerevlog)
1880 nodeiter = list(nodeiter)
1888 nodeiter = list(nodeiter)
1881 if nodeiter:
1889 if nodeiter:
1882 yield changegroup.chunkheader(len(fname))
1890 yield changegroup.chunkheader(len(fname))
1883 yield fname
1891 yield fname
1884 lookup = lookuprevlink_func(filerevlog)
1892 lookup = lookuprevlink_func(filerevlog)
1885 for chnk in filerevlog.group(nodeiter, lookup):
1893 for chnk in filerevlog.group(nodeiter, lookup):
1886 yield chnk
1894 yield chnk
1887
1895
1888 yield changegroup.closechunk()
1896 yield changegroup.closechunk()
1889
1897
1890 if nodes:
1898 if nodes:
1891 self.hook('outgoing', node=hex(nodes[0]), source=source)
1899 self.hook('outgoing', node=hex(nodes[0]), source=source)
1892
1900
1893 return util.chunkbuffer(gengroup())
1901 return util.chunkbuffer(gengroup())
1894
1902
1895 def addchangegroup(self, source, srctype, url, emptyok=False):
1903 def addchangegroup(self, source, srctype, url, emptyok=False):
1896 """add changegroup to repo.
1904 """add changegroup to repo.
1897
1905
1898 return values:
1906 return values:
1899 - nothing changed or no source: 0
1907 - nothing changed or no source: 0
1900 - more heads than before: 1+added heads (2..n)
1908 - more heads than before: 1+added heads (2..n)
1901 - less heads than before: -1-removed heads (-2..-n)
1909 - less heads than before: -1-removed heads (-2..-n)
1902 - number of heads stays the same: 1
1910 - number of heads stays the same: 1
1903 """
1911 """
1904 def csmap(x):
1912 def csmap(x):
1905 self.ui.debug(_("add changeset %s\n") % short(x))
1913 self.ui.debug(_("add changeset %s\n") % short(x))
1906 return len(cl)
1914 return len(cl)
1907
1915
1908 def revmap(x):
1916 def revmap(x):
1909 return cl.rev(x)
1917 return cl.rev(x)
1910
1918
1911 if not source:
1919 if not source:
1912 return 0
1920 return 0
1913
1921
1914 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1922 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1915
1923
1916 changesets = files = revisions = 0
1924 changesets = files = revisions = 0
1917
1925
1918 # write changelog data to temp files so concurrent readers will not see
1926 # write changelog data to temp files so concurrent readers will not see
1919 # inconsistent view
1927 # inconsistent view
1920 cl = self.changelog
1928 cl = self.changelog
1921 cl.delayupdate()
1929 cl.delayupdate()
1922 oldheads = len(cl.heads())
1930 oldheads = len(cl.heads())
1923
1931
1924 tr = self.transaction()
1932 tr = self.transaction()
1925 try:
1933 try:
1926 trp = weakref.proxy(tr)
1934 trp = weakref.proxy(tr)
1927 # pull off the changeset group
1935 # pull off the changeset group
1928 self.ui.status(_("adding changesets\n"))
1936 self.ui.status(_("adding changesets\n"))
1929 cor = len(cl) - 1
1937 cor = len(cl) - 1
1930 chunkiter = changegroup.chunkiter(source)
1938 chunkiter = changegroup.chunkiter(source)
1931 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1939 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1932 raise util.Abort(_("received changelog group is empty"))
1940 raise util.Abort(_("received changelog group is empty"))
1933 cnr = len(cl) - 1
1941 cnr = len(cl) - 1
1934 changesets = cnr - cor
1942 changesets = cnr - cor
1935
1943
1936 # pull off the manifest group
1944 # pull off the manifest group
1937 self.ui.status(_("adding manifests\n"))
1945 self.ui.status(_("adding manifests\n"))
1938 chunkiter = changegroup.chunkiter(source)
1946 chunkiter = changegroup.chunkiter(source)
1939 # no need to check for empty manifest group here:
1947 # no need to check for empty manifest group here:
1940 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1948 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1941 # no new manifest will be created and the manifest group will
1949 # no new manifest will be created and the manifest group will
1942 # be empty during the pull
1950 # be empty during the pull
1943 self.manifest.addgroup(chunkiter, revmap, trp)
1951 self.manifest.addgroup(chunkiter, revmap, trp)
1944
1952
1945 # process the files
1953 # process the files
1946 self.ui.status(_("adding file changes\n"))
1954 self.ui.status(_("adding file changes\n"))
1947 while 1:
1955 while 1:
1948 f = changegroup.getchunk(source)
1956 f = changegroup.getchunk(source)
1949 if not f:
1957 if not f:
1950 break
1958 break
1951 self.ui.debug(_("adding %s revisions\n") % f)
1959 self.ui.debug(_("adding %s revisions\n") % f)
1952 fl = self.file(f)
1960 fl = self.file(f)
1953 o = len(fl)
1961 o = len(fl)
1954 chunkiter = changegroup.chunkiter(source)
1962 chunkiter = changegroup.chunkiter(source)
1955 if fl.addgroup(chunkiter, revmap, trp) is None:
1963 if fl.addgroup(chunkiter, revmap, trp) is None:
1956 raise util.Abort(_("received file revlog group is empty"))
1964 raise util.Abort(_("received file revlog group is empty"))
1957 revisions += len(fl) - o
1965 revisions += len(fl) - o
1958 files += 1
1966 files += 1
1959
1967
1960 # make changelog see real files again
1968 # make changelog see real files again
1961 cl.finalize(trp)
1969 cl.finalize(trp)
1962
1970
1963 newheads = len(self.changelog.heads())
1971 newheads = len(self.changelog.heads())
1964 heads = ""
1972 heads = ""
1965 if oldheads and newheads != oldheads:
1973 if oldheads and newheads != oldheads:
1966 heads = _(" (%+d heads)") % (newheads - oldheads)
1974 heads = _(" (%+d heads)") % (newheads - oldheads)
1967
1975
1968 self.ui.status(_("added %d changesets"
1976 self.ui.status(_("added %d changesets"
1969 " with %d changes to %d files%s\n")
1977 " with %d changes to %d files%s\n")
1970 % (changesets, revisions, files, heads))
1978 % (changesets, revisions, files, heads))
1971
1979
1972 if changesets > 0:
1980 if changesets > 0:
1973 self.hook('pretxnchangegroup', throw=True,
1981 self.hook('pretxnchangegroup', throw=True,
1974 node=hex(self.changelog.node(cor+1)), source=srctype,
1982 node=hex(self.changelog.node(cor+1)), source=srctype,
1975 url=url)
1983 url=url)
1976
1984
1977 tr.close()
1985 tr.close()
1978 finally:
1986 finally:
1979 del tr
1987 del tr
1980
1988
1981 if changesets > 0:
1989 if changesets > 0:
1982 # forcefully update the on-disk branch cache
1990 # forcefully update the on-disk branch cache
1983 self.ui.debug(_("updating the branch cache\n"))
1991 self.ui.debug(_("updating the branch cache\n"))
1984 self.branchtags()
1992 self.branchtags()
1985 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1993 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1986 source=srctype, url=url)
1994 source=srctype, url=url)
1987
1995
1988 for i in xrange(cor + 1, cnr + 1):
1996 for i in xrange(cor + 1, cnr + 1):
1989 self.hook("incoming", node=hex(self.changelog.node(i)),
1997 self.hook("incoming", node=hex(self.changelog.node(i)),
1990 source=srctype, url=url)
1998 source=srctype, url=url)
1991
1999
1992 # never return 0 here:
2000 # never return 0 here:
1993 if newheads < oldheads:
2001 if newheads < oldheads:
1994 return newheads - oldheads - 1
2002 return newheads - oldheads - 1
1995 else:
2003 else:
1996 return newheads - oldheads + 1
2004 return newheads - oldheads + 1
1997
2005
1998
2006
1999 def stream_in(self, remote):
2007 def stream_in(self, remote):
2000 fp = remote.stream_out()
2008 fp = remote.stream_out()
2001 l = fp.readline()
2009 l = fp.readline()
2002 try:
2010 try:
2003 resp = int(l)
2011 resp = int(l)
2004 except ValueError:
2012 except ValueError:
2005 raise util.UnexpectedOutput(
2013 raise util.UnexpectedOutput(
2006 _('Unexpected response from remote server:'), l)
2014 _('Unexpected response from remote server:'), l)
2007 if resp == 1:
2015 if resp == 1:
2008 raise util.Abort(_('operation forbidden by server'))
2016 raise util.Abort(_('operation forbidden by server'))
2009 elif resp == 2:
2017 elif resp == 2:
2010 raise util.Abort(_('locking the remote repository failed'))
2018 raise util.Abort(_('locking the remote repository failed'))
2011 elif resp != 0:
2019 elif resp != 0:
2012 raise util.Abort(_('the server sent an unknown error code'))
2020 raise util.Abort(_('the server sent an unknown error code'))
2013 self.ui.status(_('streaming all changes\n'))
2021 self.ui.status(_('streaming all changes\n'))
2014 l = fp.readline()
2022 l = fp.readline()
2015 try:
2023 try:
2016 total_files, total_bytes = map(int, l.split(' ', 1))
2024 total_files, total_bytes = map(int, l.split(' ', 1))
2017 except (ValueError, TypeError):
2025 except (ValueError, TypeError):
2018 raise util.UnexpectedOutput(
2026 raise util.UnexpectedOutput(
2019 _('Unexpected response from remote server:'), l)
2027 _('Unexpected response from remote server:'), l)
2020 self.ui.status(_('%d files to transfer, %s of data\n') %
2028 self.ui.status(_('%d files to transfer, %s of data\n') %
2021 (total_files, util.bytecount(total_bytes)))
2029 (total_files, util.bytecount(total_bytes)))
2022 start = time.time()
2030 start = time.time()
2023 for i in xrange(total_files):
2031 for i in xrange(total_files):
2024 # XXX doesn't support '\n' or '\r' in filenames
2032 # XXX doesn't support '\n' or '\r' in filenames
2025 l = fp.readline()
2033 l = fp.readline()
2026 try:
2034 try:
2027 name, size = l.split('\0', 1)
2035 name, size = l.split('\0', 1)
2028 size = int(size)
2036 size = int(size)
2029 except (ValueError, TypeError):
2037 except (ValueError, TypeError):
2030 raise util.UnexpectedOutput(
2038 raise util.UnexpectedOutput(
2031 _('Unexpected response from remote server:'), l)
2039 _('Unexpected response from remote server:'), l)
2032 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2040 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2033 ofp = self.sopener(name, 'w')
2041 ofp = self.sopener(name, 'w')
2034 for chunk in util.filechunkiter(fp, limit=size):
2042 for chunk in util.filechunkiter(fp, limit=size):
2035 ofp.write(chunk)
2043 ofp.write(chunk)
2036 ofp.close()
2044 ofp.close()
2037 elapsed = time.time() - start
2045 elapsed = time.time() - start
2038 if elapsed <= 0:
2046 if elapsed <= 0:
2039 elapsed = 0.001
2047 elapsed = 0.001
2040 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2048 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2041 (util.bytecount(total_bytes), elapsed,
2049 (util.bytecount(total_bytes), elapsed,
2042 util.bytecount(total_bytes / elapsed)))
2050 util.bytecount(total_bytes / elapsed)))
2043 self.invalidate()
2051 self.invalidate()
2044 return len(self.heads()) + 1
2052 return len(self.heads()) + 1
2045
2053
2046 def clone(self, remote, heads=[], stream=False):
2054 def clone(self, remote, heads=[], stream=False):
2047 '''clone remote repository.
2055 '''clone remote repository.
2048
2056
2049 keyword arguments:
2057 keyword arguments:
2050 heads: list of revs to clone (forces use of pull)
2058 heads: list of revs to clone (forces use of pull)
2051 stream: use streaming clone if possible'''
2059 stream: use streaming clone if possible'''
2052
2060
2053 # now, all clients that can request uncompressed clones can
2061 # now, all clients that can request uncompressed clones can
2054 # read repo formats supported by all servers that can serve
2062 # read repo formats supported by all servers that can serve
2055 # them.
2063 # them.
2056
2064
2057 # if revlog format changes, client will have to check version
2065 # if revlog format changes, client will have to check version
2058 # and format flags on "stream" capability, and use
2066 # and format flags on "stream" capability, and use
2059 # uncompressed only if compatible.
2067 # uncompressed only if compatible.
2060
2068
2061 if stream and not heads and remote.capable('stream'):
2069 if stream and not heads and remote.capable('stream'):
2062 return self.stream_in(remote)
2070 return self.stream_in(remote)
2063 return self.pull(remote, heads)
2071 return self.pull(remote, heads)
2064
2072
2065 # used to avoid circular references so destructors work
2073 # used to avoid circular references so destructors work
2066 def aftertrans(files):
2074 def aftertrans(files):
2067 renamefiles = [tuple(t) for t in files]
2075 renamefiles = [tuple(t) for t in files]
2068 def a():
2076 def a():
2069 for src, dest in renamefiles:
2077 for src, dest in renamefiles:
2070 util.rename(src, dest)
2078 util.rename(src, dest)
2071 return a
2079 return a
2072
2080
2073 def instance(ui, path, create):
2081 def instance(ui, path, create):
2074 return localrepository(ui, util.drop_scheme('file', path), create)
2082 return localrepository(ui, util.drop_scheme('file', path), create)
2075
2083
2076 def islocal(path):
2084 def islocal(path):
2077 return True
2085 return True
@@ -1,110 +1,126 b''
1 #!/bin/sh
1 #!/bin/sh
2
2
3 echo % commit date test
3 echo % commit date test
4 hg init test
4 hg init test
5 cd test
5 cd test
6 echo foo > foo
6 echo foo > foo
7 hg add foo
7 hg add foo
8 HGEDITOR=true hg commit -m ""
8 HGEDITOR=true hg commit -m ""
9 hg commit -d '0 0' -m commit-1
9 hg commit -d '0 0' -m commit-1
10 echo foo >> foo
10 echo foo >> foo
11 hg commit -d '1 4444444' -m commit-3
11 hg commit -d '1 4444444' -m commit-3
12 hg commit -d '1 15.1' -m commit-4
12 hg commit -d '1 15.1' -m commit-4
13 hg commit -d 'foo bar' -m commit-5
13 hg commit -d 'foo bar' -m commit-5
14 hg commit -d ' 1 4444' -m commit-6
14 hg commit -d ' 1 4444' -m commit-6
15 hg commit -d '111111111111 0' -m commit-7
15 hg commit -d '111111111111 0' -m commit-7
16
16
17 echo % commit added file that has been deleted
17 echo % commit added file that has been deleted
18 echo bar > bar
18 echo bar > bar
19 hg add bar
19 hg add bar
20 rm bar
20 rm bar
21 hg commit -d "1000000 0" -m commit-8
21 hg commit -d "1000000 0" -m commit-8
22 hg commit -d "1000000 0" -m commit-8-2 bar
22 hg commit -d "1000000 0" -m commit-8-2 bar
23
23
24 hg -q revert -a --no-backup
24 hg -q revert -a --no-backup
25
25
26 mkdir dir
26 mkdir dir
27 echo boo > dir/file
27 echo boo > dir/file
28 hg add
28 hg add
29 hg -v commit -d '0 0' -m commit-9 dir
29 hg -v commit -d '0 0' -m commit-9 dir
30
30
31 echo > dir.file
31 echo > dir.file
32 hg add
32 hg add
33 hg commit -d '0 0' -m commit-10 dir dir.file
33 hg commit -d '0 0' -m commit-10 dir dir.file
34
34
35 echo >> dir/file
35 echo >> dir/file
36 mkdir bleh
36 mkdir bleh
37 mkdir dir2
37 mkdir dir2
38 cd bleh
38 cd bleh
39 hg commit -d '0 0' -m commit-11 .
39 hg commit -d '0 0' -m commit-11 .
40 hg commit -d '0 0' -m commit-12 ../dir ../dir2
40 hg commit -d '0 0' -m commit-12 ../dir ../dir2
41 hg -v commit -d '0 0' -m commit-13 ../dir
41 hg -v commit -d '0 0' -m commit-13 ../dir
42 cd ..
42 cd ..
43
43
44 hg commit -d '0 0' -m commit-14 does-not-exist
44 hg commit -d '0 0' -m commit-14 does-not-exist
45 ln -s foo baz
45 ln -s foo baz
46 hg commit -d '0 0' -m commit-15 baz
46 hg commit -d '0 0' -m commit-15 baz
47 touch quux
47 touch quux
48 hg commit -d '0 0' -m commit-16 quux
48 hg commit -d '0 0' -m commit-16 quux
49 echo >> dir/file
49 echo >> dir/file
50 hg -v commit -d '0 0' -m commit-17 dir/file
50 hg -v commit -d '0 0' -m commit-17 dir/file
51 # An empty date was interpreted as epoch origin
51 # An empty date was interpreted as epoch origin
52 echo foo >> foo
52 echo foo >> foo
53 hg commit -d '' -m commit-no-date
53 hg commit -d '' -m commit-no-date
54 hg tip --template '{date|isodate}\n' | grep '1970'
54 hg tip --template '{date|isodate}\n' | grep '1970'
55 cd ..
55 cd ..
56
56
57 echo % partial subdir commit test
57 echo % partial subdir commit test
58 hg init test2
58 hg init test2
59 cd test2
59 cd test2
60 mkdir foo
60 mkdir foo
61 echo foo > foo/foo
61 echo foo > foo/foo
62 mkdir bar
62 mkdir bar
63 echo bar > bar/bar
63 echo bar > bar/bar
64 hg add
64 hg add
65 hg ci -d '1000000 0' -u test -m commit-subdir-1 foo
65 hg ci -d '1000000 0' -u test -m commit-subdir-1 foo
66 hg ci -d '1000001 0' -u test -m commit-subdir-2 bar
66 hg ci -d '1000001 0' -u test -m commit-subdir-2 bar
67 echo % subdir log 1
67 echo % subdir log 1
68 hg log -v foo
68 hg log -v foo
69 echo % subdir log 2
69 echo % subdir log 2
70 hg log -v bar
70 hg log -v bar
71 echo % full log
71 echo % full log
72 hg log -v
72 hg log -v
73 cd ..
73 cd ..
74
74
75 echo % dot and subdir commit test
75 echo % dot and subdir commit test
76 hg init test3
76 hg init test3
77 cd test3
77 cd test3
78 mkdir foo
78 mkdir foo
79 echo foo content > foo/plain-file
79 echo foo content > foo/plain-file
80 hg add foo/plain-file
80 hg add foo/plain-file
81 hg ci -d '1000000 0' -u test -m commit-foo-subdir foo
81 hg ci -d '1000000 0' -u test -m commit-foo-subdir foo
82 echo modified foo content > foo/plain-file
82 echo modified foo content > foo/plain-file
83 hg ci -d '2000000 0' -u test -m commit-foo-dot .
83 hg ci -d '2000000 0' -u test -m commit-foo-dot .
84 echo % full log
84 echo % full log
85 hg log -v
85 hg log -v
86 echo % subdir log
86 echo % subdir log
87 cd foo
87 cd foo
88 hg log .
88 hg log .
89 cd ..
89 cd ..
90 cd ..
90 cd ..
91
91
92 cd ..
92 cd ..
93 hg init issue1049
93 hg init issue1049
94 cd issue1049
94 cd issue1049
95 echo a > a
95 echo a > a
96 hg ci -Ama
96 hg ci -Ama
97 echo a >> a
97 echo a >> a
98 hg ci -mb
98 hg ci -mb
99 hg up 0
99 hg up 0
100 echo b >> a
100 echo b >> a
101 hg ci -mc
101 hg ci -mc
102 HGMERGE=true hg merge
102 HGMERGE=true hg merge
103 echo % should fail because we are specifying a file name
103 echo % should fail because we are specifying a file name
104 hg ci -mmerge a
104 hg ci -mmerge a
105 echo % should fail because we are specifying a pattern
105 echo % should fail because we are specifying a pattern
106 hg ci -mmerge -I a
106 hg ci -mmerge -I a
107 echo % should succeed
107 echo % should succeed
108 hg ci -mmerge
108 hg ci -mmerge
109 cd ..
110
111
112 echo % test commit message content
113 hg init commitmsg
114 cd commitmsg
115 echo changed > changed
116 echo removed > removed
117 hg ci -qAm init
118
119 hg rm removed
120 echo changed >> changed
121 echo added > added
122 hg add added
123 HGEDITOR=cat hg ci -A
124 cd ..
109
125
110 exit 0
126 exit 0
@@ -1,108 +1,121 b''
1 % commit date test
1 % commit date test
2 transaction abort!
2 transaction abort!
3 rollback completed
3 rollback completed
4 abort: empty commit message
4 abort: empty commit message
5 abort: impossible time zone offset: 4444444
5 abort: impossible time zone offset: 4444444
6 abort: invalid date: '1\t15.1'
6 abort: invalid date: '1\t15.1'
7 abort: invalid date: 'foo bar'
7 abort: invalid date: 'foo bar'
8 abort: date exceeds 32 bits: 111111111111
8 abort: date exceeds 32 bits: 111111111111
9 % commit added file that has been deleted
9 % commit added file that has been deleted
10 nothing changed
10 nothing changed
11 abort: file bar not found!
11 abort: file bar not found!
12 adding dir/file
12 adding dir/file
13 dir/file
13 dir/file
14 committed changeset 2:d2a76177cb42
14 committed changeset 2:d2a76177cb42
15 adding dir.file
15 adding dir.file
16 abort: no match under directory dir!
16 abort: no match under directory dir!
17 abort: no match under directory .!
17 abort: no match under directory .!
18 abort: no match under directory ../dir2!
18 abort: no match under directory ../dir2!
19 dir/file
19 dir/file
20 committed changeset 3:1cd62a2d8db5
20 committed changeset 3:1cd62a2d8db5
21 does-not-exist: No such file or directory
21 does-not-exist: No such file or directory
22 abort: file does-not-exist not found!
22 abort: file does-not-exist not found!
23 abort: file baz not tracked!
23 abort: file baz not tracked!
24 abort: file quux not tracked!
24 abort: file quux not tracked!
25 dir/file
25 dir/file
26 committed changeset 4:49176991390e
26 committed changeset 4:49176991390e
27 % partial subdir commit test
27 % partial subdir commit test
28 adding bar/bar
28 adding bar/bar
29 adding foo/foo
29 adding foo/foo
30 % subdir log 1
30 % subdir log 1
31 changeset: 0:6ef3cb06bb80
31 changeset: 0:6ef3cb06bb80
32 user: test
32 user: test
33 date: Mon Jan 12 13:46:40 1970 +0000
33 date: Mon Jan 12 13:46:40 1970 +0000
34 files: foo/foo
34 files: foo/foo
35 description:
35 description:
36 commit-subdir-1
36 commit-subdir-1
37
37
38
38
39 % subdir log 2
39 % subdir log 2
40 changeset: 1:f2e51572cf5a
40 changeset: 1:f2e51572cf5a
41 tag: tip
41 tag: tip
42 user: test
42 user: test
43 date: Mon Jan 12 13:46:41 1970 +0000
43 date: Mon Jan 12 13:46:41 1970 +0000
44 files: bar/bar
44 files: bar/bar
45 description:
45 description:
46 commit-subdir-2
46 commit-subdir-2
47
47
48
48
49 % full log
49 % full log
50 changeset: 1:f2e51572cf5a
50 changeset: 1:f2e51572cf5a
51 tag: tip
51 tag: tip
52 user: test
52 user: test
53 date: Mon Jan 12 13:46:41 1970 +0000
53 date: Mon Jan 12 13:46:41 1970 +0000
54 files: bar/bar
54 files: bar/bar
55 description:
55 description:
56 commit-subdir-2
56 commit-subdir-2
57
57
58
58
59 changeset: 0:6ef3cb06bb80
59 changeset: 0:6ef3cb06bb80
60 user: test
60 user: test
61 date: Mon Jan 12 13:46:40 1970 +0000
61 date: Mon Jan 12 13:46:40 1970 +0000
62 files: foo/foo
62 files: foo/foo
63 description:
63 description:
64 commit-subdir-1
64 commit-subdir-1
65
65
66
66
67 % dot and subdir commit test
67 % dot and subdir commit test
68 % full log
68 % full log
69 changeset: 1:d9180e04fa8a
69 changeset: 1:d9180e04fa8a
70 tag: tip
70 tag: tip
71 user: test
71 user: test
72 date: Sat Jan 24 03:33:20 1970 +0000
72 date: Sat Jan 24 03:33:20 1970 +0000
73 files: foo/plain-file
73 files: foo/plain-file
74 description:
74 description:
75 commit-foo-dot
75 commit-foo-dot
76
76
77
77
78 changeset: 0:80b572aaf098
78 changeset: 0:80b572aaf098
79 user: test
79 user: test
80 date: Mon Jan 12 13:46:40 1970 +0000
80 date: Mon Jan 12 13:46:40 1970 +0000
81 files: foo/plain-file
81 files: foo/plain-file
82 description:
82 description:
83 commit-foo-subdir
83 commit-foo-subdir
84
84
85
85
86 % subdir log
86 % subdir log
87 changeset: 1:d9180e04fa8a
87 changeset: 1:d9180e04fa8a
88 tag: tip
88 tag: tip
89 user: test
89 user: test
90 date: Sat Jan 24 03:33:20 1970 +0000
90 date: Sat Jan 24 03:33:20 1970 +0000
91 summary: commit-foo-dot
91 summary: commit-foo-dot
92
92
93 changeset: 0:80b572aaf098
93 changeset: 0:80b572aaf098
94 user: test
94 user: test
95 date: Mon Jan 12 13:46:40 1970 +0000
95 date: Mon Jan 12 13:46:40 1970 +0000
96 summary: commit-foo-subdir
96 summary: commit-foo-subdir
97
97
98 adding a
98 adding a
99 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
99 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
100 created new head
100 created new head
101 merging a
101 merging a
102 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
102 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
103 (branch merge, don't forget to commit)
103 (branch merge, don't forget to commit)
104 % should fail because we are specifying a file name
104 % should fail because we are specifying a file name
105 abort: cannot partially commit a merge (do not specify files or patterns)
105 abort: cannot partially commit a merge (do not specify files or patterns)
106 % should fail because we are specifying a pattern
106 % should fail because we are specifying a pattern
107 abort: cannot partially commit a merge (do not specify files or patterns)
107 abort: cannot partially commit a merge (do not specify files or patterns)
108 % should succeed
108 % should succeed
109 % test commit message content
110
111
112 HG: Enter commit message. Lines beginning with 'HG:' are removed.
113 HG: --
114 HG: user: test
115 HG: branch 'default'
116 HG: added added
117 HG: changed changed
118 HG: removed removed
119 transaction abort!
120 rollback completed
121 abort: empty commit message
General Comments 0
You need to be logged in to leave comments. Login now