##// END OF EJS Templates
Fix some bugs introduced during the manifest refactoring
Alexis S. L. Carvalho -
r2857:18cf5349 default
parent child Browse files
Show More
@@ -1,173 +1,174
1 # archival.py - revision archival for mercurial
1 # archival.py - revision archival for mercurial
2 #
2 #
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of
5 # This software may be used and distributed according to the terms of
6 # the GNU General Public License, incorporated herein by reference.
6 # the GNU General Public License, incorporated herein by reference.
7
7
8 from demandload import *
8 from demandload import *
9 from i18n import gettext as _
9 from i18n import gettext as _
10 from node import *
10 from node import *
11 demandload(globals(), 'cStringIO os stat tarfile time util zipfile')
11 demandload(globals(), 'cStringIO os stat tarfile time util zipfile')
12
12
13 def tidyprefix(dest, prefix, suffixes):
13 def tidyprefix(dest, prefix, suffixes):
14 '''choose prefix to use for names in archive. make sure prefix is
14 '''choose prefix to use for names in archive. make sure prefix is
15 safe for consumers.'''
15 safe for consumers.'''
16
16
17 if prefix:
17 if prefix:
18 prefix = prefix.replace('\\', '/')
18 prefix = prefix.replace('\\', '/')
19 else:
19 else:
20 if not isinstance(dest, str):
20 if not isinstance(dest, str):
21 raise ValueError('dest must be string if no prefix')
21 raise ValueError('dest must be string if no prefix')
22 prefix = os.path.basename(dest)
22 prefix = os.path.basename(dest)
23 lower = prefix.lower()
23 lower = prefix.lower()
24 for sfx in suffixes:
24 for sfx in suffixes:
25 if lower.endswith(sfx):
25 if lower.endswith(sfx):
26 prefix = prefix[:-len(sfx)]
26 prefix = prefix[:-len(sfx)]
27 break
27 break
28 lpfx = os.path.normpath(util.localpath(prefix))
28 lpfx = os.path.normpath(util.localpath(prefix))
29 prefix = util.pconvert(lpfx)
29 prefix = util.pconvert(lpfx)
30 if not prefix.endswith('/'):
30 if not prefix.endswith('/'):
31 prefix += '/'
31 prefix += '/'
32 if prefix.startswith('../') or os.path.isabs(lpfx) or '/../' in prefix:
32 if prefix.startswith('../') or os.path.isabs(lpfx) or '/../' in prefix:
33 raise util.Abort(_('archive prefix contains illegal components'))
33 raise util.Abort(_('archive prefix contains illegal components'))
34 return prefix
34 return prefix
35
35
36 class tarit:
36 class tarit:
37 '''write archive to tar file or stream. can write uncompressed,
37 '''write archive to tar file or stream. can write uncompressed,
38 or compress with gzip or bzip2.'''
38 or compress with gzip or bzip2.'''
39
39
40 def __init__(self, dest, prefix, mtime, kind=''):
40 def __init__(self, dest, prefix, mtime, kind=''):
41 self.prefix = tidyprefix(dest, prefix, ['.tar', '.tar.bz2', '.tar.gz',
41 self.prefix = tidyprefix(dest, prefix, ['.tar', '.tar.bz2', '.tar.gz',
42 '.tgz', 'tbz2'])
42 '.tgz', 'tbz2'])
43 self.mtime = mtime
43 self.mtime = mtime
44 if isinstance(dest, str):
44 if isinstance(dest, str):
45 self.z = tarfile.open(dest, mode='w:'+kind)
45 self.z = tarfile.open(dest, mode='w:'+kind)
46 else:
46 else:
47 self.z = tarfile.open(mode='w|'+kind, fileobj=dest)
47 self.z = tarfile.open(mode='w|'+kind, fileobj=dest)
48
48
49 def addfile(self, name, mode, data):
49 def addfile(self, name, mode, data):
50 i = tarfile.TarInfo(self.prefix + name)
50 i = tarfile.TarInfo(self.prefix + name)
51 i.mtime = self.mtime
51 i.mtime = self.mtime
52 i.size = len(data)
52 i.size = len(data)
53 i.mode = mode
53 i.mode = mode
54 self.z.addfile(i, cStringIO.StringIO(data))
54 self.z.addfile(i, cStringIO.StringIO(data))
55
55
56 def done(self):
56 def done(self):
57 self.z.close()
57 self.z.close()
58
58
59 class tellable:
59 class tellable:
60 '''provide tell method for zipfile.ZipFile when writing to http
60 '''provide tell method for zipfile.ZipFile when writing to http
61 response file object.'''
61 response file object.'''
62
62
63 def __init__(self, fp):
63 def __init__(self, fp):
64 self.fp = fp
64 self.fp = fp
65 self.offset = 0
65 self.offset = 0
66
66
67 def __getattr__(self, key):
67 def __getattr__(self, key):
68 return getattr(self.fp, key)
68 return getattr(self.fp, key)
69
69
70 def write(self, s):
70 def write(self, s):
71 self.fp.write(s)
71 self.fp.write(s)
72 self.offset += len(s)
72 self.offset += len(s)
73
73
74 def tell(self):
74 def tell(self):
75 return self.offset
75 return self.offset
76
76
77 class zipit:
77 class zipit:
78 '''write archive to zip file or stream. can write uncompressed,
78 '''write archive to zip file or stream. can write uncompressed,
79 or compressed with deflate.'''
79 or compressed with deflate.'''
80
80
81 def __init__(self, dest, prefix, mtime, compress=True):
81 def __init__(self, dest, prefix, mtime, compress=True):
82 self.prefix = tidyprefix(dest, prefix, ('.zip',))
82 self.prefix = tidyprefix(dest, prefix, ('.zip',))
83 if not isinstance(dest, str):
83 if not isinstance(dest, str):
84 try:
84 try:
85 dest.tell()
85 dest.tell()
86 except (AttributeError, IOError):
86 except (AttributeError, IOError):
87 dest = tellable(dest)
87 dest = tellable(dest)
88 self.z = zipfile.ZipFile(dest, 'w',
88 self.z = zipfile.ZipFile(dest, 'w',
89 compress and zipfile.ZIP_DEFLATED or
89 compress and zipfile.ZIP_DEFLATED or
90 zipfile.ZIP_STORED)
90 zipfile.ZIP_STORED)
91 self.date_time = time.gmtime(mtime)[:6]
91 self.date_time = time.gmtime(mtime)[:6]
92
92
93 def addfile(self, name, mode, data):
93 def addfile(self, name, mode, data):
94 i = zipfile.ZipInfo(self.prefix + name, self.date_time)
94 i = zipfile.ZipInfo(self.prefix + name, self.date_time)
95 i.compress_type = self.z.compression
95 i.compress_type = self.z.compression
96 i.flag_bits = 0x08
96 i.flag_bits = 0x08
97 # unzip will not honor unix file modes unless file creator is
97 # unzip will not honor unix file modes unless file creator is
98 # set to unix (id 3).
98 # set to unix (id 3).
99 i.create_system = 3
99 i.create_system = 3
100 i.external_attr = (mode | stat.S_IFREG) << 16L
100 i.external_attr = (mode | stat.S_IFREG) << 16L
101 self.z.writestr(i, data)
101 self.z.writestr(i, data)
102
102
103 def done(self):
103 def done(self):
104 self.z.close()
104 self.z.close()
105
105
106 class fileit:
106 class fileit:
107 '''write archive as files in directory.'''
107 '''write archive as files in directory.'''
108
108
109 def __init__(self, name, prefix, mtime):
109 def __init__(self, name, prefix, mtime):
110 if prefix:
110 if prefix:
111 raise util.Abort(_('cannot give prefix when archiving to files'))
111 raise util.Abort(_('cannot give prefix when archiving to files'))
112 self.basedir = name
112 self.basedir = name
113 self.dirs = {}
113 self.dirs = {}
114 self.oflags = (os.O_CREAT | os.O_EXCL | os.O_WRONLY |
114 self.oflags = (os.O_CREAT | os.O_EXCL | os.O_WRONLY |
115 getattr(os, 'O_BINARY', 0) |
115 getattr(os, 'O_BINARY', 0) |
116 getattr(os, 'O_NOFOLLOW', 0))
116 getattr(os, 'O_NOFOLLOW', 0))
117
117
118 def addfile(self, name, mode, data):
118 def addfile(self, name, mode, data):
119 destfile = os.path.join(self.basedir, name)
119 destfile = os.path.join(self.basedir, name)
120 destdir = os.path.dirname(destfile)
120 destdir = os.path.dirname(destfile)
121 if destdir not in self.dirs:
121 if destdir not in self.dirs:
122 if not os.path.isdir(destdir):
122 if not os.path.isdir(destdir):
123 os.makedirs(destdir)
123 os.makedirs(destdir)
124 self.dirs[destdir] = 1
124 self.dirs[destdir] = 1
125 os.fdopen(os.open(destfile, self.oflags, mode), 'wb').write(data)
125 os.fdopen(os.open(destfile, self.oflags, mode), 'wb').write(data)
126
126
127 def done(self):
127 def done(self):
128 pass
128 pass
129
129
130 archivers = {
130 archivers = {
131 'files': fileit,
131 'files': fileit,
132 'tar': tarit,
132 'tar': tarit,
133 'tbz2': lambda name, prefix, mtime: tarit(name, prefix, mtime, 'bz2'),
133 'tbz2': lambda name, prefix, mtime: tarit(name, prefix, mtime, 'bz2'),
134 'tgz': lambda name, prefix, mtime: tarit(name, prefix, mtime, 'gz'),
134 'tgz': lambda name, prefix, mtime: tarit(name, prefix, mtime, 'gz'),
135 'uzip': lambda name, prefix, mtime: zipit(name, prefix, mtime, False),
135 'uzip': lambda name, prefix, mtime: zipit(name, prefix, mtime, False),
136 'zip': zipit,
136 'zip': zipit,
137 }
137 }
138
138
139 def archive(repo, dest, node, kind, decode=True, matchfn=None,
139 def archive(repo, dest, node, kind, decode=True, matchfn=None,
140 prefix=None, mtime=None):
140 prefix=None, mtime=None):
141 '''create archive of repo as it was at node.
141 '''create archive of repo as it was at node.
142
142
143 dest can be name of directory, name of archive file, or file
143 dest can be name of directory, name of archive file, or file
144 object to write archive to.
144 object to write archive to.
145
145
146 kind is type of archive to create.
146 kind is type of archive to create.
147
147
148 decode tells whether to put files through decode filters from
148 decode tells whether to put files through decode filters from
149 hgrc.
149 hgrc.
150
150
151 matchfn is function to filter names of files to write to archive.
151 matchfn is function to filter names of files to write to archive.
152
152
153 prefix is name of path to put before every archive member.'''
153 prefix is name of path to put before every archive member.'''
154
154
155 def write(name, mode, data):
155 def write(name, mode, data):
156 if matchfn and not matchfn(name): return
156 if matchfn and not matchfn(name): return
157 if decode:
157 if decode:
158 fp = cStringIO.StringIO()
158 fp = cStringIO.StringIO()
159 repo.wwrite(name, data, fp)
159 repo.wwrite(name, data, fp)
160 data = fp.getvalue()
160 data = fp.getvalue()
161 archiver.addfile(name, mode, data)
161 archiver.addfile(name, mode, data)
162
162
163 change = repo.changelog.read(node)
163 change = repo.changelog.read(node)
164 mn = change[0]
164 mn = change[0]
165 archiver = archivers[kind](dest, prefix, mtime or change[2][0])
165 archiver = archivers[kind](dest, prefix, mtime or change[2][0])
166 mf = repo.manifest.read(mn).items()
166 m = repo.manifest.read(mn)
167 mf.sort()
167 items = m.items()
168 items.sort()
168 write('.hg_archival.txt', 0644,
169 write('.hg_archival.txt', 0644,
169 'repo: %s\nnode: %s\n' % (hex(repo.changelog.node(0)), hex(node)))
170 'repo: %s\nnode: %s\n' % (hex(repo.changelog.node(0)), hex(node)))
170 for filename, filenode in mf:
171 for filename, filenode in items:
171 write(filename, mf.execf(filename) and 0755 or 0644,
172 write(filename, m.execf(filename) and 0755 or 0644,
172 repo.file(filename).read(filenode))
173 repo.file(filename).read(filenode))
173 archiver.done()
174 archiver.done()
@@ -1,991 +1,991
1 # hgweb/hgweb_mod.py - Web interface for a repository.
1 # hgweb/hgweb_mod.py - Web interface for a repository.
2 #
2 #
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
4 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005 Matt Mackall <mpm@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms
6 # This software may be used and distributed according to the terms
7 # of the GNU General Public License, incorporated herein by reference.
7 # of the GNU General Public License, incorporated herein by reference.
8
8
9 import os
9 import os
10 import os.path
10 import os.path
11 import mimetypes
11 import mimetypes
12 from mercurial.demandload import demandload
12 from mercurial.demandload import demandload
13 demandload(globals(), "re zlib ConfigParser mimetools cStringIO sys tempfile")
13 demandload(globals(), "re zlib ConfigParser mimetools cStringIO sys tempfile")
14 demandload(globals(), "mercurial:mdiff,ui,hg,util,archival,streamclone")
14 demandload(globals(), "mercurial:mdiff,ui,hg,util,archival,streamclone")
15 demandload(globals(), "mercurial:templater")
15 demandload(globals(), "mercurial:templater")
16 demandload(globals(), "mercurial.hgweb.common:get_mtime,staticfile")
16 demandload(globals(), "mercurial.hgweb.common:get_mtime,staticfile")
17 from mercurial.node import *
17 from mercurial.node import *
18 from mercurial.i18n import gettext as _
18 from mercurial.i18n import gettext as _
19
19
20 def _up(p):
20 def _up(p):
21 if p[0] != "/":
21 if p[0] != "/":
22 p = "/" + p
22 p = "/" + p
23 if p[-1] == "/":
23 if p[-1] == "/":
24 p = p[:-1]
24 p = p[:-1]
25 up = os.path.dirname(p)
25 up = os.path.dirname(p)
26 if up == "/":
26 if up == "/":
27 return "/"
27 return "/"
28 return up + "/"
28 return up + "/"
29
29
30 class hgweb(object):
30 class hgweb(object):
31 def __init__(self, repo, name=None):
31 def __init__(self, repo, name=None):
32 if type(repo) == type(""):
32 if type(repo) == type(""):
33 self.repo = hg.repository(ui.ui(), repo)
33 self.repo = hg.repository(ui.ui(), repo)
34 else:
34 else:
35 self.repo = repo
35 self.repo = repo
36
36
37 self.mtime = -1
37 self.mtime = -1
38 self.reponame = name
38 self.reponame = name
39 self.archives = 'zip', 'gz', 'bz2'
39 self.archives = 'zip', 'gz', 'bz2'
40 self.stripecount = 1
40 self.stripecount = 1
41 self.templatepath = self.repo.ui.config("web", "templates",
41 self.templatepath = self.repo.ui.config("web", "templates",
42 templater.templatepath())
42 templater.templatepath())
43
43
44 def refresh(self):
44 def refresh(self):
45 mtime = get_mtime(self.repo.root)
45 mtime = get_mtime(self.repo.root)
46 if mtime != self.mtime:
46 if mtime != self.mtime:
47 self.mtime = mtime
47 self.mtime = mtime
48 self.repo = hg.repository(self.repo.ui, self.repo.root)
48 self.repo = hg.repository(self.repo.ui, self.repo.root)
49 self.maxchanges = int(self.repo.ui.config("web", "maxchanges", 10))
49 self.maxchanges = int(self.repo.ui.config("web", "maxchanges", 10))
50 self.stripecount = int(self.repo.ui.config("web", "stripes", 1))
50 self.stripecount = int(self.repo.ui.config("web", "stripes", 1))
51 self.maxshortchanges = int(self.repo.ui.config("web", "maxshortchanges", 60))
51 self.maxshortchanges = int(self.repo.ui.config("web", "maxshortchanges", 60))
52 self.maxfiles = int(self.repo.ui.config("web", "maxfiles", 10))
52 self.maxfiles = int(self.repo.ui.config("web", "maxfiles", 10))
53 self.allowpull = self.repo.ui.configbool("web", "allowpull", True)
53 self.allowpull = self.repo.ui.configbool("web", "allowpull", True)
54
54
55 def archivelist(self, nodeid):
55 def archivelist(self, nodeid):
56 allowed = self.repo.ui.configlist("web", "allow_archive")
56 allowed = self.repo.ui.configlist("web", "allow_archive")
57 for i in self.archives:
57 for i in self.archives:
58 if i in allowed or self.repo.ui.configbool("web", "allow" + i):
58 if i in allowed or self.repo.ui.configbool("web", "allow" + i):
59 yield {"type" : i, "node" : nodeid, "url": ""}
59 yield {"type" : i, "node" : nodeid, "url": ""}
60
60
61 def listfiles(self, files, mf):
61 def listfiles(self, files, mf):
62 for f in files[:self.maxfiles]:
62 for f in files[:self.maxfiles]:
63 yield self.t("filenodelink", node=hex(mf[f]), file=f)
63 yield self.t("filenodelink", node=hex(mf[f]), file=f)
64 if len(files) > self.maxfiles:
64 if len(files) > self.maxfiles:
65 yield self.t("fileellipses")
65 yield self.t("fileellipses")
66
66
67 def listfilediffs(self, files, changeset):
67 def listfilediffs(self, files, changeset):
68 for f in files[:self.maxfiles]:
68 for f in files[:self.maxfiles]:
69 yield self.t("filedifflink", node=hex(changeset), file=f)
69 yield self.t("filedifflink", node=hex(changeset), file=f)
70 if len(files) > self.maxfiles:
70 if len(files) > self.maxfiles:
71 yield self.t("fileellipses")
71 yield self.t("fileellipses")
72
72
73 def siblings(self, siblings=[], rev=None, hiderev=None, **args):
73 def siblings(self, siblings=[], rev=None, hiderev=None, **args):
74 if not rev:
74 if not rev:
75 rev = lambda x: ""
75 rev = lambda x: ""
76 siblings = [s for s in siblings if s != nullid]
76 siblings = [s for s in siblings if s != nullid]
77 if len(siblings) == 1 and rev(siblings[0]) == hiderev:
77 if len(siblings) == 1 and rev(siblings[0]) == hiderev:
78 return
78 return
79 for s in siblings:
79 for s in siblings:
80 yield dict(node=hex(s), rev=rev(s), **args)
80 yield dict(node=hex(s), rev=rev(s), **args)
81
81
82 def renamelink(self, fl, node):
82 def renamelink(self, fl, node):
83 r = fl.renamed(node)
83 r = fl.renamed(node)
84 if r:
84 if r:
85 return [dict(file=r[0], node=hex(r[1]))]
85 return [dict(file=r[0], node=hex(r[1]))]
86 return []
86 return []
87
87
88 def showtag(self, t1, node=nullid, **args):
88 def showtag(self, t1, node=nullid, **args):
89 for t in self.repo.nodetags(node):
89 for t in self.repo.nodetags(node):
90 yield self.t(t1, tag=t, **args)
90 yield self.t(t1, tag=t, **args)
91
91
92 def diff(self, node1, node2, files):
92 def diff(self, node1, node2, files):
93 def filterfiles(filters, files):
93 def filterfiles(filters, files):
94 l = [x for x in files if x in filters]
94 l = [x for x in files if x in filters]
95
95
96 for t in filters:
96 for t in filters:
97 if t and t[-1] != os.sep:
97 if t and t[-1] != os.sep:
98 t += os.sep
98 t += os.sep
99 l += [x for x in files if x.startswith(t)]
99 l += [x for x in files if x.startswith(t)]
100 return l
100 return l
101
101
102 parity = [0]
102 parity = [0]
103 def diffblock(diff, f, fn):
103 def diffblock(diff, f, fn):
104 yield self.t("diffblock",
104 yield self.t("diffblock",
105 lines=prettyprintlines(diff),
105 lines=prettyprintlines(diff),
106 parity=parity[0],
106 parity=parity[0],
107 file=f,
107 file=f,
108 filenode=hex(fn or nullid))
108 filenode=hex(fn or nullid))
109 parity[0] = 1 - parity[0]
109 parity[0] = 1 - parity[0]
110
110
111 def prettyprintlines(diff):
111 def prettyprintlines(diff):
112 for l in diff.splitlines(1):
112 for l in diff.splitlines(1):
113 if l.startswith('+'):
113 if l.startswith('+'):
114 yield self.t("difflineplus", line=l)
114 yield self.t("difflineplus", line=l)
115 elif l.startswith('-'):
115 elif l.startswith('-'):
116 yield self.t("difflineminus", line=l)
116 yield self.t("difflineminus", line=l)
117 elif l.startswith('@'):
117 elif l.startswith('@'):
118 yield self.t("difflineat", line=l)
118 yield self.t("difflineat", line=l)
119 else:
119 else:
120 yield self.t("diffline", line=l)
120 yield self.t("diffline", line=l)
121
121
122 r = self.repo
122 r = self.repo
123 cl = r.changelog
123 cl = r.changelog
124 mf = r.manifest
124 mf = r.manifest
125 change1 = cl.read(node1)
125 change1 = cl.read(node1)
126 change2 = cl.read(node2)
126 change2 = cl.read(node2)
127 mmap1 = mf.read(change1[0])
127 mmap1 = mf.read(change1[0])
128 mmap2 = mf.read(change2[0])
128 mmap2 = mf.read(change2[0])
129 date1 = util.datestr(change1[2])
129 date1 = util.datestr(change1[2])
130 date2 = util.datestr(change2[2])
130 date2 = util.datestr(change2[2])
131
131
132 modified, added, removed, deleted, unknown = r.changes(node1, node2)
132 modified, added, removed, deleted, unknown = r.changes(node1, node2)
133 if files:
133 if files:
134 modified, added, removed = map(lambda x: filterfiles(files, x),
134 modified, added, removed = map(lambda x: filterfiles(files, x),
135 (modified, added, removed))
135 (modified, added, removed))
136
136
137 diffopts = self.repo.ui.diffopts()
137 diffopts = self.repo.ui.diffopts()
138 showfunc = diffopts['showfunc']
138 showfunc = diffopts['showfunc']
139 ignorews = diffopts['ignorews']
139 ignorews = diffopts['ignorews']
140 ignorewsamount = diffopts['ignorewsamount']
140 ignorewsamount = diffopts['ignorewsamount']
141 ignoreblanklines = diffopts['ignoreblanklines']
141 ignoreblanklines = diffopts['ignoreblanklines']
142 for f in modified:
142 for f in modified:
143 to = r.file(f).read(mmap1[f])
143 to = r.file(f).read(mmap1[f])
144 tn = r.file(f).read(mmap2[f])
144 tn = r.file(f).read(mmap2[f])
145 yield diffblock(mdiff.unidiff(to, date1, tn, date2, f,
145 yield diffblock(mdiff.unidiff(to, date1, tn, date2, f,
146 showfunc=showfunc, ignorews=ignorews,
146 showfunc=showfunc, ignorews=ignorews,
147 ignorewsamount=ignorewsamount,
147 ignorewsamount=ignorewsamount,
148 ignoreblanklines=ignoreblanklines), f, tn)
148 ignoreblanklines=ignoreblanklines), f, tn)
149 for f in added:
149 for f in added:
150 to = None
150 to = None
151 tn = r.file(f).read(mmap2[f])
151 tn = r.file(f).read(mmap2[f])
152 yield diffblock(mdiff.unidiff(to, date1, tn, date2, f,
152 yield diffblock(mdiff.unidiff(to, date1, tn, date2, f,
153 showfunc=showfunc, ignorews=ignorews,
153 showfunc=showfunc, ignorews=ignorews,
154 ignorewsamount=ignorewsamount,
154 ignorewsamount=ignorewsamount,
155 ignoreblanklines=ignoreblanklines), f, tn)
155 ignoreblanklines=ignoreblanklines), f, tn)
156 for f in removed:
156 for f in removed:
157 to = r.file(f).read(mmap1[f])
157 to = r.file(f).read(mmap1[f])
158 tn = None
158 tn = None
159 yield diffblock(mdiff.unidiff(to, date1, tn, date2, f,
159 yield diffblock(mdiff.unidiff(to, date1, tn, date2, f,
160 showfunc=showfunc, ignorews=ignorews,
160 showfunc=showfunc, ignorews=ignorews,
161 ignorewsamount=ignorewsamount,
161 ignorewsamount=ignorewsamount,
162 ignoreblanklines=ignoreblanklines), f, tn)
162 ignoreblanklines=ignoreblanklines), f, tn)
163
163
164 def changelog(self, pos, shortlog=False):
164 def changelog(self, pos, shortlog=False):
165 def changenav(**map):
165 def changenav(**map):
166 def seq(factor, maxchanges=None):
166 def seq(factor, maxchanges=None):
167 if maxchanges:
167 if maxchanges:
168 yield maxchanges
168 yield maxchanges
169 if maxchanges >= 20 and maxchanges <= 40:
169 if maxchanges >= 20 and maxchanges <= 40:
170 yield 50
170 yield 50
171 else:
171 else:
172 yield 1 * factor
172 yield 1 * factor
173 yield 3 * factor
173 yield 3 * factor
174 for f in seq(factor * 10):
174 for f in seq(factor * 10):
175 yield f
175 yield f
176
176
177 l = []
177 l = []
178 last = 0
178 last = 0
179 maxchanges = shortlog and self.maxshortchanges or self.maxchanges
179 maxchanges = shortlog and self.maxshortchanges or self.maxchanges
180 for f in seq(1, maxchanges):
180 for f in seq(1, maxchanges):
181 if f < maxchanges or f <= last:
181 if f < maxchanges or f <= last:
182 continue
182 continue
183 if f > count:
183 if f > count:
184 break
184 break
185 last = f
185 last = f
186 r = "%d" % f
186 r = "%d" % f
187 if pos + f < count:
187 if pos + f < count:
188 l.append(("+" + r, pos + f))
188 l.append(("+" + r, pos + f))
189 if pos - f >= 0:
189 if pos - f >= 0:
190 l.insert(0, ("-" + r, pos - f))
190 l.insert(0, ("-" + r, pos - f))
191
191
192 yield {"rev": 0, "label": "(0)"}
192 yield {"rev": 0, "label": "(0)"}
193
193
194 for label, rev in l:
194 for label, rev in l:
195 yield {"label": label, "rev": rev}
195 yield {"label": label, "rev": rev}
196
196
197 yield {"label": "tip", "rev": "tip"}
197 yield {"label": "tip", "rev": "tip"}
198
198
199 def changelist(**map):
199 def changelist(**map):
200 parity = (start - end) & 1
200 parity = (start - end) & 1
201 cl = self.repo.changelog
201 cl = self.repo.changelog
202 l = [] # build a list in forward order for efficiency
202 l = [] # build a list in forward order for efficiency
203 for i in range(start, end):
203 for i in range(start, end):
204 n = cl.node(i)
204 n = cl.node(i)
205 changes = cl.read(n)
205 changes = cl.read(n)
206 hn = hex(n)
206 hn = hex(n)
207
207
208 l.insert(0, {"parity": parity,
208 l.insert(0, {"parity": parity,
209 "author": changes[1],
209 "author": changes[1],
210 "parent": self.siblings(cl.parents(n), cl.rev,
210 "parent": self.siblings(cl.parents(n), cl.rev,
211 cl.rev(n) - 1),
211 cl.rev(n) - 1),
212 "child": self.siblings(cl.children(n), cl.rev,
212 "child": self.siblings(cl.children(n), cl.rev,
213 cl.rev(n) + 1),
213 cl.rev(n) + 1),
214 "changelogtag": self.showtag("changelogtag",n),
214 "changelogtag": self.showtag("changelogtag",n),
215 "manifest": hex(changes[0]),
215 "manifest": hex(changes[0]),
216 "desc": changes[4],
216 "desc": changes[4],
217 "date": changes[2],
217 "date": changes[2],
218 "files": self.listfilediffs(changes[3], n),
218 "files": self.listfilediffs(changes[3], n),
219 "rev": i,
219 "rev": i,
220 "node": hn})
220 "node": hn})
221 parity = 1 - parity
221 parity = 1 - parity
222
222
223 for e in l:
223 for e in l:
224 yield e
224 yield e
225
225
226 maxchanges = shortlog and self.maxshortchanges or self.maxchanges
226 maxchanges = shortlog and self.maxshortchanges or self.maxchanges
227 cl = self.repo.changelog
227 cl = self.repo.changelog
228 mf = cl.read(cl.tip())[0]
228 mf = cl.read(cl.tip())[0]
229 count = cl.count()
229 count = cl.count()
230 start = max(0, pos - maxchanges + 1)
230 start = max(0, pos - maxchanges + 1)
231 end = min(count, start + maxchanges)
231 end = min(count, start + maxchanges)
232 pos = end - 1
232 pos = end - 1
233
233
234 yield self.t(shortlog and 'shortlog' or 'changelog',
234 yield self.t(shortlog and 'shortlog' or 'changelog',
235 changenav=changenav,
235 changenav=changenav,
236 manifest=hex(mf),
236 manifest=hex(mf),
237 rev=pos, changesets=count, entries=changelist,
237 rev=pos, changesets=count, entries=changelist,
238 archives=self.archivelist("tip"))
238 archives=self.archivelist("tip"))
239
239
240 def search(self, query):
240 def search(self, query):
241
241
242 def changelist(**map):
242 def changelist(**map):
243 cl = self.repo.changelog
243 cl = self.repo.changelog
244 count = 0
244 count = 0
245 qw = query.lower().split()
245 qw = query.lower().split()
246
246
247 def revgen():
247 def revgen():
248 for i in range(cl.count() - 1, 0, -100):
248 for i in range(cl.count() - 1, 0, -100):
249 l = []
249 l = []
250 for j in range(max(0, i - 100), i):
250 for j in range(max(0, i - 100), i):
251 n = cl.node(j)
251 n = cl.node(j)
252 changes = cl.read(n)
252 changes = cl.read(n)
253 l.append((n, j, changes))
253 l.append((n, j, changes))
254 l.reverse()
254 l.reverse()
255 for e in l:
255 for e in l:
256 yield e
256 yield e
257
257
258 for n, i, changes in revgen():
258 for n, i, changes in revgen():
259 miss = 0
259 miss = 0
260 for q in qw:
260 for q in qw:
261 if not (q in changes[1].lower() or
261 if not (q in changes[1].lower() or
262 q in changes[4].lower() or
262 q in changes[4].lower() or
263 q in " ".join(changes[3][:20]).lower()):
263 q in " ".join(changes[3][:20]).lower()):
264 miss = 1
264 miss = 1
265 break
265 break
266 if miss:
266 if miss:
267 continue
267 continue
268
268
269 count += 1
269 count += 1
270 hn = hex(n)
270 hn = hex(n)
271
271
272 yield self.t('searchentry',
272 yield self.t('searchentry',
273 parity=self.stripes(count),
273 parity=self.stripes(count),
274 author=changes[1],
274 author=changes[1],
275 parent=self.siblings(cl.parents(n), cl.rev),
275 parent=self.siblings(cl.parents(n), cl.rev),
276 child=self.siblings(cl.children(n), cl.rev),
276 child=self.siblings(cl.children(n), cl.rev),
277 changelogtag=self.showtag("changelogtag",n),
277 changelogtag=self.showtag("changelogtag",n),
278 manifest=hex(changes[0]),
278 manifest=hex(changes[0]),
279 desc=changes[4],
279 desc=changes[4],
280 date=changes[2],
280 date=changes[2],
281 files=self.listfilediffs(changes[3], n),
281 files=self.listfilediffs(changes[3], n),
282 rev=i,
282 rev=i,
283 node=hn)
283 node=hn)
284
284
285 if count >= self.maxchanges:
285 if count >= self.maxchanges:
286 break
286 break
287
287
288 cl = self.repo.changelog
288 cl = self.repo.changelog
289 mf = cl.read(cl.tip())[0]
289 mf = cl.read(cl.tip())[0]
290
290
291 yield self.t('search',
291 yield self.t('search',
292 query=query,
292 query=query,
293 manifest=hex(mf),
293 manifest=hex(mf),
294 entries=changelist)
294 entries=changelist)
295
295
296 def changeset(self, nodeid):
296 def changeset(self, nodeid):
297 cl = self.repo.changelog
297 cl = self.repo.changelog
298 n = self.repo.lookup(nodeid)
298 n = self.repo.lookup(nodeid)
299 nodeid = hex(n)
299 nodeid = hex(n)
300 changes = cl.read(n)
300 changes = cl.read(n)
301 p1 = cl.parents(n)[0]
301 p1 = cl.parents(n)[0]
302
302
303 files = []
303 files = []
304 mf = self.repo.manifest.read(changes[0])
304 mf = self.repo.manifest.read(changes[0])
305 for f in changes[3]:
305 for f in changes[3]:
306 files.append(self.t("filenodelink",
306 files.append(self.t("filenodelink",
307 filenode=hex(mf.get(f, nullid)), file=f))
307 filenode=hex(mf.get(f, nullid)), file=f))
308
308
309 def diff(**map):
309 def diff(**map):
310 yield self.diff(p1, n, None)
310 yield self.diff(p1, n, None)
311
311
312 yield self.t('changeset',
312 yield self.t('changeset',
313 diff=diff,
313 diff=diff,
314 rev=cl.rev(n),
314 rev=cl.rev(n),
315 node=nodeid,
315 node=nodeid,
316 parent=self.siblings(cl.parents(n), cl.rev),
316 parent=self.siblings(cl.parents(n), cl.rev),
317 child=self.siblings(cl.children(n), cl.rev),
317 child=self.siblings(cl.children(n), cl.rev),
318 changesettag=self.showtag("changesettag",n),
318 changesettag=self.showtag("changesettag",n),
319 manifest=hex(changes[0]),
319 manifest=hex(changes[0]),
320 author=changes[1],
320 author=changes[1],
321 desc=changes[4],
321 desc=changes[4],
322 date=changes[2],
322 date=changes[2],
323 files=files,
323 files=files,
324 archives=self.archivelist(nodeid))
324 archives=self.archivelist(nodeid))
325
325
326 def filelog(self, f, filenode):
326 def filelog(self, f, filenode):
327 cl = self.repo.changelog
327 cl = self.repo.changelog
328 fl = self.repo.file(f)
328 fl = self.repo.file(f)
329 filenode = hex(fl.lookup(filenode))
329 filenode = hex(fl.lookup(filenode))
330 count = fl.count()
330 count = fl.count()
331
331
332 def entries(**map):
332 def entries(**map):
333 l = []
333 l = []
334 parity = (count - 1) & 1
334 parity = (count - 1) & 1
335
335
336 for i in range(count):
336 for i in range(count):
337 n = fl.node(i)
337 n = fl.node(i)
338 lr = fl.linkrev(n)
338 lr = fl.linkrev(n)
339 cn = cl.node(lr)
339 cn = cl.node(lr)
340 cs = cl.read(cl.node(lr))
340 cs = cl.read(cl.node(lr))
341
341
342 l.insert(0, {"parity": parity,
342 l.insert(0, {"parity": parity,
343 "filenode": hex(n),
343 "filenode": hex(n),
344 "filerev": i,
344 "filerev": i,
345 "file": f,
345 "file": f,
346 "node": hex(cn),
346 "node": hex(cn),
347 "author": cs[1],
347 "author": cs[1],
348 "date": cs[2],
348 "date": cs[2],
349 "rename": self.renamelink(fl, n),
349 "rename": self.renamelink(fl, n),
350 "parent": self.siblings(fl.parents(n),
350 "parent": self.siblings(fl.parents(n),
351 fl.rev, file=f),
351 fl.rev, file=f),
352 "child": self.siblings(fl.children(n),
352 "child": self.siblings(fl.children(n),
353 fl.rev, file=f),
353 fl.rev, file=f),
354 "desc": cs[4]})
354 "desc": cs[4]})
355 parity = 1 - parity
355 parity = 1 - parity
356
356
357 for e in l:
357 for e in l:
358 yield e
358 yield e
359
359
360 yield self.t("filelog", file=f, filenode=filenode, entries=entries)
360 yield self.t("filelog", file=f, filenode=filenode, entries=entries)
361
361
362 def filerevision(self, f, node):
362 def filerevision(self, f, node):
363 fl = self.repo.file(f)
363 fl = self.repo.file(f)
364 n = fl.lookup(node)
364 n = fl.lookup(node)
365 node = hex(n)
365 node = hex(n)
366 text = fl.read(n)
366 text = fl.read(n)
367 changerev = fl.linkrev(n)
367 changerev = fl.linkrev(n)
368 cl = self.repo.changelog
368 cl = self.repo.changelog
369 cn = cl.node(changerev)
369 cn = cl.node(changerev)
370 cs = cl.read(cn)
370 cs = cl.read(cn)
371 mfn = cs[0]
371 mfn = cs[0]
372
372
373 mt = mimetypes.guess_type(f)[0]
373 mt = mimetypes.guess_type(f)[0]
374 rawtext = text
374 rawtext = text
375 if util.binary(text):
375 if util.binary(text):
376 mt = mt or 'application/octet-stream'
376 mt = mt or 'application/octet-stream'
377 text = "(binary:%s)" % mt
377 text = "(binary:%s)" % mt
378 mt = mt or 'text/plain'
378 mt = mt or 'text/plain'
379
379
380 def lines():
380 def lines():
381 for l, t in enumerate(text.splitlines(1)):
381 for l, t in enumerate(text.splitlines(1)):
382 yield {"line": t,
382 yield {"line": t,
383 "linenumber": "% 6d" % (l + 1),
383 "linenumber": "% 6d" % (l + 1),
384 "parity": self.stripes(l)}
384 "parity": self.stripes(l)}
385
385
386 yield self.t("filerevision",
386 yield self.t("filerevision",
387 file=f,
387 file=f,
388 filenode=node,
388 filenode=node,
389 path=_up(f),
389 path=_up(f),
390 text=lines(),
390 text=lines(),
391 raw=rawtext,
391 raw=rawtext,
392 mimetype=mt,
392 mimetype=mt,
393 rev=changerev,
393 rev=changerev,
394 node=hex(cn),
394 node=hex(cn),
395 manifest=hex(mfn),
395 manifest=hex(mfn),
396 author=cs[1],
396 author=cs[1],
397 date=cs[2],
397 date=cs[2],
398 parent=self.siblings(fl.parents(n), fl.rev, file=f),
398 parent=self.siblings(fl.parents(n), fl.rev, file=f),
399 child=self.siblings(fl.children(n), fl.rev, file=f),
399 child=self.siblings(fl.children(n), fl.rev, file=f),
400 rename=self.renamelink(fl, n),
400 rename=self.renamelink(fl, n),
401 permissions=self.repo.manifest.read(mfn).execf[f])
401 permissions=self.repo.manifest.read(mfn).execf(f))
402
402
403 def fileannotate(self, f, node):
403 def fileannotate(self, f, node):
404 bcache = {}
404 bcache = {}
405 ncache = {}
405 ncache = {}
406 fl = self.repo.file(f)
406 fl = self.repo.file(f)
407 n = fl.lookup(node)
407 n = fl.lookup(node)
408 node = hex(n)
408 node = hex(n)
409 changerev = fl.linkrev(n)
409 changerev = fl.linkrev(n)
410
410
411 cl = self.repo.changelog
411 cl = self.repo.changelog
412 cn = cl.node(changerev)
412 cn = cl.node(changerev)
413 cs = cl.read(cn)
413 cs = cl.read(cn)
414 mfn = cs[0]
414 mfn = cs[0]
415
415
416 def annotate(**map):
416 def annotate(**map):
417 parity = 0
417 parity = 0
418 last = None
418 last = None
419 for r, l in fl.annotate(n):
419 for r, l in fl.annotate(n):
420 try:
420 try:
421 cnode = ncache[r]
421 cnode = ncache[r]
422 except KeyError:
422 except KeyError:
423 cnode = ncache[r] = self.repo.changelog.node(r)
423 cnode = ncache[r] = self.repo.changelog.node(r)
424
424
425 try:
425 try:
426 name = bcache[r]
426 name = bcache[r]
427 except KeyError:
427 except KeyError:
428 cl = self.repo.changelog.read(cnode)
428 cl = self.repo.changelog.read(cnode)
429 bcache[r] = name = self.repo.ui.shortuser(cl[1])
429 bcache[r] = name = self.repo.ui.shortuser(cl[1])
430
430
431 if last != cnode:
431 if last != cnode:
432 parity = 1 - parity
432 parity = 1 - parity
433 last = cnode
433 last = cnode
434
434
435 yield {"parity": parity,
435 yield {"parity": parity,
436 "node": hex(cnode),
436 "node": hex(cnode),
437 "rev": r,
437 "rev": r,
438 "author": name,
438 "author": name,
439 "file": f,
439 "file": f,
440 "line": l}
440 "line": l}
441
441
442 yield self.t("fileannotate",
442 yield self.t("fileannotate",
443 file=f,
443 file=f,
444 filenode=node,
444 filenode=node,
445 annotate=annotate,
445 annotate=annotate,
446 path=_up(f),
446 path=_up(f),
447 rev=changerev,
447 rev=changerev,
448 node=hex(cn),
448 node=hex(cn),
449 manifest=hex(mfn),
449 manifest=hex(mfn),
450 author=cs[1],
450 author=cs[1],
451 date=cs[2],
451 date=cs[2],
452 rename=self.renamelink(fl, n),
452 rename=self.renamelink(fl, n),
453 parent=self.siblings(fl.parents(n), fl.rev, file=f),
453 parent=self.siblings(fl.parents(n), fl.rev, file=f),
454 child=self.siblings(fl.children(n), fl.rev, file=f),
454 child=self.siblings(fl.children(n), fl.rev, file=f),
455 permissions=self.repo.manifest.read(mfn).execf[f])
455 permissions=self.repo.manifest.read(mfn).execf(f))
456
456
457 def manifest(self, mnode, path):
457 def manifest(self, mnode, path):
458 man = self.repo.manifest
458 man = self.repo.manifest
459 mn = man.lookup(mnode)
459 mn = man.lookup(mnode)
460 mnode = hex(mn)
460 mnode = hex(mn)
461 mf = man.read(mn)
461 mf = man.read(mn)
462 rev = man.rev(mn)
462 rev = man.rev(mn)
463 changerev = man.linkrev(mn)
463 changerev = man.linkrev(mn)
464 node = self.repo.changelog.node(changerev)
464 node = self.repo.changelog.node(changerev)
465
465
466 files = {}
466 files = {}
467
467
468 p = path[1:]
468 p = path[1:]
469 if p and p[-1] != "/":
469 if p and p[-1] != "/":
470 p += "/"
470 p += "/"
471 l = len(p)
471 l = len(p)
472
472
473 for f,n in mf.items():
473 for f,n in mf.items():
474 if f[:l] != p:
474 if f[:l] != p:
475 continue
475 continue
476 remain = f[l:]
476 remain = f[l:]
477 if "/" in remain:
477 if "/" in remain:
478 short = remain[:remain.index("/") + 1] # bleah
478 short = remain[:remain.index("/") + 1] # bleah
479 files[short] = (f, None)
479 files[short] = (f, None)
480 else:
480 else:
481 short = os.path.basename(remain)
481 short = os.path.basename(remain)
482 files[short] = (f, n)
482 files[short] = (f, n)
483
483
484 def filelist(**map):
484 def filelist(**map):
485 parity = 0
485 parity = 0
486 fl = files.keys()
486 fl = files.keys()
487 fl.sort()
487 fl.sort()
488 for f in fl:
488 for f in fl:
489 full, fnode = files[f]
489 full, fnode = files[f]
490 if not fnode:
490 if not fnode:
491 continue
491 continue
492
492
493 yield {"file": full,
493 yield {"file": full,
494 "manifest": mnode,
494 "manifest": mnode,
495 "filenode": hex(fnode),
495 "filenode": hex(fnode),
496 "parity": self.stripes(parity),
496 "parity": self.stripes(parity),
497 "basename": f,
497 "basename": f,
498 "permissions": mf.execf[full]}
498 "permissions": mf.execf(full)}
499 parity += 1
499 parity += 1
500
500
501 def dirlist(**map):
501 def dirlist(**map):
502 parity = 0
502 parity = 0
503 fl = files.keys()
503 fl = files.keys()
504 fl.sort()
504 fl.sort()
505 for f in fl:
505 for f in fl:
506 full, fnode = files[f]
506 full, fnode = files[f]
507 if fnode:
507 if fnode:
508 continue
508 continue
509
509
510 yield {"parity": self.stripes(parity),
510 yield {"parity": self.stripes(parity),
511 "path": os.path.join(path, f),
511 "path": os.path.join(path, f),
512 "manifest": mnode,
512 "manifest": mnode,
513 "basename": f[:-1]}
513 "basename": f[:-1]}
514 parity += 1
514 parity += 1
515
515
516 yield self.t("manifest",
516 yield self.t("manifest",
517 manifest=mnode,
517 manifest=mnode,
518 rev=rev,
518 rev=rev,
519 node=hex(node),
519 node=hex(node),
520 path=path,
520 path=path,
521 up=_up(path),
521 up=_up(path),
522 fentries=filelist,
522 fentries=filelist,
523 dentries=dirlist,
523 dentries=dirlist,
524 archives=self.archivelist(hex(node)))
524 archives=self.archivelist(hex(node)))
525
525
526 def tags(self):
526 def tags(self):
527 cl = self.repo.changelog
527 cl = self.repo.changelog
528 mf = cl.read(cl.tip())[0]
528 mf = cl.read(cl.tip())[0]
529
529
530 i = self.repo.tagslist()
530 i = self.repo.tagslist()
531 i.reverse()
531 i.reverse()
532
532
533 def entries(notip=False, **map):
533 def entries(notip=False, **map):
534 parity = 0
534 parity = 0
535 for k,n in i:
535 for k,n in i:
536 if notip and k == "tip": continue
536 if notip and k == "tip": continue
537 yield {"parity": self.stripes(parity),
537 yield {"parity": self.stripes(parity),
538 "tag": k,
538 "tag": k,
539 "tagmanifest": hex(cl.read(n)[0]),
539 "tagmanifest": hex(cl.read(n)[0]),
540 "date": cl.read(n)[2],
540 "date": cl.read(n)[2],
541 "node": hex(n)}
541 "node": hex(n)}
542 parity += 1
542 parity += 1
543
543
544 yield self.t("tags",
544 yield self.t("tags",
545 manifest=hex(mf),
545 manifest=hex(mf),
546 entries=lambda **x: entries(False, **x),
546 entries=lambda **x: entries(False, **x),
547 entriesnotip=lambda **x: entries(True, **x))
547 entriesnotip=lambda **x: entries(True, **x))
548
548
549 def summary(self):
549 def summary(self):
550 cl = self.repo.changelog
550 cl = self.repo.changelog
551 mf = cl.read(cl.tip())[0]
551 mf = cl.read(cl.tip())[0]
552
552
553 i = self.repo.tagslist()
553 i = self.repo.tagslist()
554 i.reverse()
554 i.reverse()
555
555
556 def tagentries(**map):
556 def tagentries(**map):
557 parity = 0
557 parity = 0
558 count = 0
558 count = 0
559 for k,n in i:
559 for k,n in i:
560 if k == "tip": # skip tip
560 if k == "tip": # skip tip
561 continue;
561 continue;
562
562
563 count += 1
563 count += 1
564 if count > 10: # limit to 10 tags
564 if count > 10: # limit to 10 tags
565 break;
565 break;
566
566
567 c = cl.read(n)
567 c = cl.read(n)
568 m = c[0]
568 m = c[0]
569 t = c[2]
569 t = c[2]
570
570
571 yield self.t("tagentry",
571 yield self.t("tagentry",
572 parity = self.stripes(parity),
572 parity = self.stripes(parity),
573 tag = k,
573 tag = k,
574 node = hex(n),
574 node = hex(n),
575 date = t,
575 date = t,
576 tagmanifest = hex(m))
576 tagmanifest = hex(m))
577 parity += 1
577 parity += 1
578
578
579 def changelist(**map):
579 def changelist(**map):
580 parity = 0
580 parity = 0
581 cl = self.repo.changelog
581 cl = self.repo.changelog
582 l = [] # build a list in forward order for efficiency
582 l = [] # build a list in forward order for efficiency
583 for i in range(start, end):
583 for i in range(start, end):
584 n = cl.node(i)
584 n = cl.node(i)
585 changes = cl.read(n)
585 changes = cl.read(n)
586 hn = hex(n)
586 hn = hex(n)
587 t = changes[2]
587 t = changes[2]
588
588
589 l.insert(0, self.t(
589 l.insert(0, self.t(
590 'shortlogentry',
590 'shortlogentry',
591 parity = parity,
591 parity = parity,
592 author = changes[1],
592 author = changes[1],
593 manifest = hex(changes[0]),
593 manifest = hex(changes[0]),
594 desc = changes[4],
594 desc = changes[4],
595 date = t,
595 date = t,
596 rev = i,
596 rev = i,
597 node = hn))
597 node = hn))
598 parity = 1 - parity
598 parity = 1 - parity
599
599
600 yield l
600 yield l
601
601
602 cl = self.repo.changelog
602 cl = self.repo.changelog
603 mf = cl.read(cl.tip())[0]
603 mf = cl.read(cl.tip())[0]
604 count = cl.count()
604 count = cl.count()
605 start = max(0, count - self.maxchanges)
605 start = max(0, count - self.maxchanges)
606 end = min(count, start + self.maxchanges)
606 end = min(count, start + self.maxchanges)
607
607
608 yield self.t("summary",
608 yield self.t("summary",
609 desc = self.repo.ui.config("web", "description", "unknown"),
609 desc = self.repo.ui.config("web", "description", "unknown"),
610 owner = (self.repo.ui.config("ui", "username") or # preferred
610 owner = (self.repo.ui.config("ui", "username") or # preferred
611 self.repo.ui.config("web", "contact") or # deprecated
611 self.repo.ui.config("web", "contact") or # deprecated
612 self.repo.ui.config("web", "author", "unknown")), # also
612 self.repo.ui.config("web", "author", "unknown")), # also
613 lastchange = (0, 0), # FIXME
613 lastchange = (0, 0), # FIXME
614 manifest = hex(mf),
614 manifest = hex(mf),
615 tags = tagentries,
615 tags = tagentries,
616 shortlog = changelist,
616 shortlog = changelist,
617 archives=self.archivelist("tip"))
617 archives=self.archivelist("tip"))
618
618
619 def filediff(self, file, changeset):
619 def filediff(self, file, changeset):
620 cl = self.repo.changelog
620 cl = self.repo.changelog
621 n = self.repo.lookup(changeset)
621 n = self.repo.lookup(changeset)
622 changeset = hex(n)
622 changeset = hex(n)
623 p1 = cl.parents(n)[0]
623 p1 = cl.parents(n)[0]
624 cs = cl.read(n)
624 cs = cl.read(n)
625 mf = self.repo.manifest.read(cs[0])
625 mf = self.repo.manifest.read(cs[0])
626
626
627 def diff(**map):
627 def diff(**map):
628 yield self.diff(p1, n, [file])
628 yield self.diff(p1, n, [file])
629
629
630 yield self.t("filediff",
630 yield self.t("filediff",
631 file=file,
631 file=file,
632 filenode=hex(mf.get(file, nullid)),
632 filenode=hex(mf.get(file, nullid)),
633 node=changeset,
633 node=changeset,
634 rev=self.repo.changelog.rev(n),
634 rev=self.repo.changelog.rev(n),
635 parent=self.siblings(cl.parents(n), cl.rev),
635 parent=self.siblings(cl.parents(n), cl.rev),
636 child=self.siblings(cl.children(n), cl.rev),
636 child=self.siblings(cl.children(n), cl.rev),
637 diff=diff)
637 diff=diff)
638
638
639 archive_specs = {
639 archive_specs = {
640 'bz2': ('application/x-tar', 'tbz2', '.tar.bz2', None),
640 'bz2': ('application/x-tar', 'tbz2', '.tar.bz2', None),
641 'gz': ('application/x-tar', 'tgz', '.tar.gz', None),
641 'gz': ('application/x-tar', 'tgz', '.tar.gz', None),
642 'zip': ('application/zip', 'zip', '.zip', None),
642 'zip': ('application/zip', 'zip', '.zip', None),
643 }
643 }
644
644
645 def archive(self, req, cnode, type_):
645 def archive(self, req, cnode, type_):
646 reponame = re.sub(r"\W+", "-", os.path.basename(self.reponame))
646 reponame = re.sub(r"\W+", "-", os.path.basename(self.reponame))
647 name = "%s-%s" % (reponame, short(cnode))
647 name = "%s-%s" % (reponame, short(cnode))
648 mimetype, artype, extension, encoding = self.archive_specs[type_]
648 mimetype, artype, extension, encoding = self.archive_specs[type_]
649 headers = [('Content-type', mimetype),
649 headers = [('Content-type', mimetype),
650 ('Content-disposition', 'attachment; filename=%s%s' %
650 ('Content-disposition', 'attachment; filename=%s%s' %
651 (name, extension))]
651 (name, extension))]
652 if encoding:
652 if encoding:
653 headers.append(('Content-encoding', encoding))
653 headers.append(('Content-encoding', encoding))
654 req.header(headers)
654 req.header(headers)
655 archival.archive(self.repo, req.out, cnode, artype, prefix=name)
655 archival.archive(self.repo, req.out, cnode, artype, prefix=name)
656
656
657 # add tags to things
657 # add tags to things
658 # tags -> list of changesets corresponding to tags
658 # tags -> list of changesets corresponding to tags
659 # find tag, changeset, file
659 # find tag, changeset, file
660
660
661 def cleanpath(self, path):
661 def cleanpath(self, path):
662 p = util.normpath(path)
662 p = util.normpath(path)
663 if p[:2] == "..":
663 if p[:2] == "..":
664 raise Exception("suspicious path")
664 raise Exception("suspicious path")
665 return p
665 return p
666
666
667 def run(self):
667 def run(self):
668 if not os.environ.get('GATEWAY_INTERFACE', '').startswith("CGI/1."):
668 if not os.environ.get('GATEWAY_INTERFACE', '').startswith("CGI/1."):
669 raise RuntimeError("This function is only intended to be called while running as a CGI script.")
669 raise RuntimeError("This function is only intended to be called while running as a CGI script.")
670 import mercurial.hgweb.wsgicgi as wsgicgi
670 import mercurial.hgweb.wsgicgi as wsgicgi
671 from request import wsgiapplication
671 from request import wsgiapplication
672 def make_web_app():
672 def make_web_app():
673 return self
673 return self
674 wsgicgi.launch(wsgiapplication(make_web_app))
674 wsgicgi.launch(wsgiapplication(make_web_app))
675
675
676 def run_wsgi(self, req):
676 def run_wsgi(self, req):
677 def header(**map):
677 def header(**map):
678 header_file = cStringIO.StringIO(''.join(self.t("header", **map)))
678 header_file = cStringIO.StringIO(''.join(self.t("header", **map)))
679 msg = mimetools.Message(header_file, 0)
679 msg = mimetools.Message(header_file, 0)
680 req.header(msg.items())
680 req.header(msg.items())
681 yield header_file.read()
681 yield header_file.read()
682
682
683 def rawfileheader(**map):
683 def rawfileheader(**map):
684 req.header([('Content-type', map['mimetype']),
684 req.header([('Content-type', map['mimetype']),
685 ('Content-disposition', 'filename=%s' % map['file']),
685 ('Content-disposition', 'filename=%s' % map['file']),
686 ('Content-length', str(len(map['raw'])))])
686 ('Content-length', str(len(map['raw'])))])
687 yield ''
687 yield ''
688
688
689 def footer(**map):
689 def footer(**map):
690 yield self.t("footer",
690 yield self.t("footer",
691 motd=self.repo.ui.config("web", "motd", ""),
691 motd=self.repo.ui.config("web", "motd", ""),
692 **map)
692 **map)
693
693
694 def expand_form(form):
694 def expand_form(form):
695 shortcuts = {
695 shortcuts = {
696 'cl': [('cmd', ['changelog']), ('rev', None)],
696 'cl': [('cmd', ['changelog']), ('rev', None)],
697 'sl': [('cmd', ['shortlog']), ('rev', None)],
697 'sl': [('cmd', ['shortlog']), ('rev', None)],
698 'cs': [('cmd', ['changeset']), ('node', None)],
698 'cs': [('cmd', ['changeset']), ('node', None)],
699 'f': [('cmd', ['file']), ('filenode', None)],
699 'f': [('cmd', ['file']), ('filenode', None)],
700 'fl': [('cmd', ['filelog']), ('filenode', None)],
700 'fl': [('cmd', ['filelog']), ('filenode', None)],
701 'fd': [('cmd', ['filediff']), ('node', None)],
701 'fd': [('cmd', ['filediff']), ('node', None)],
702 'fa': [('cmd', ['annotate']), ('filenode', None)],
702 'fa': [('cmd', ['annotate']), ('filenode', None)],
703 'mf': [('cmd', ['manifest']), ('manifest', None)],
703 'mf': [('cmd', ['manifest']), ('manifest', None)],
704 'ca': [('cmd', ['archive']), ('node', None)],
704 'ca': [('cmd', ['archive']), ('node', None)],
705 'tags': [('cmd', ['tags'])],
705 'tags': [('cmd', ['tags'])],
706 'tip': [('cmd', ['changeset']), ('node', ['tip'])],
706 'tip': [('cmd', ['changeset']), ('node', ['tip'])],
707 'static': [('cmd', ['static']), ('file', None)]
707 'static': [('cmd', ['static']), ('file', None)]
708 }
708 }
709
709
710 for k in shortcuts.iterkeys():
710 for k in shortcuts.iterkeys():
711 if form.has_key(k):
711 if form.has_key(k):
712 for name, value in shortcuts[k]:
712 for name, value in shortcuts[k]:
713 if value is None:
713 if value is None:
714 value = form[k]
714 value = form[k]
715 form[name] = value
715 form[name] = value
716 del form[k]
716 del form[k]
717
717
718 self.refresh()
718 self.refresh()
719
719
720 expand_form(req.form)
720 expand_form(req.form)
721
721
722 m = os.path.join(self.templatepath, "map")
722 m = os.path.join(self.templatepath, "map")
723 style = self.repo.ui.config("web", "style", "")
723 style = self.repo.ui.config("web", "style", "")
724 if req.form.has_key('style'):
724 if req.form.has_key('style'):
725 style = req.form['style'][0]
725 style = req.form['style'][0]
726 if style:
726 if style:
727 b = os.path.basename("map-" + style)
727 b = os.path.basename("map-" + style)
728 p = os.path.join(self.templatepath, b)
728 p = os.path.join(self.templatepath, b)
729 if os.path.isfile(p):
729 if os.path.isfile(p):
730 m = p
730 m = p
731
731
732 port = req.env["SERVER_PORT"]
732 port = req.env["SERVER_PORT"]
733 port = port != "80" and (":" + port) or ""
733 port = port != "80" and (":" + port) or ""
734 uri = req.env["REQUEST_URI"]
734 uri = req.env["REQUEST_URI"]
735 if "?" in uri:
735 if "?" in uri:
736 uri = uri.split("?")[0]
736 uri = uri.split("?")[0]
737 url = "http://%s%s%s" % (req.env["SERVER_NAME"], port, uri)
737 url = "http://%s%s%s" % (req.env["SERVER_NAME"], port, uri)
738 if not self.reponame:
738 if not self.reponame:
739 self.reponame = (self.repo.ui.config("web", "name")
739 self.reponame = (self.repo.ui.config("web", "name")
740 or uri.strip('/') or self.repo.root)
740 or uri.strip('/') or self.repo.root)
741
741
742 self.t = templater.templater(m, templater.common_filters,
742 self.t = templater.templater(m, templater.common_filters,
743 defaults={"url": url,
743 defaults={"url": url,
744 "repo": self.reponame,
744 "repo": self.reponame,
745 "header": header,
745 "header": header,
746 "footer": footer,
746 "footer": footer,
747 "rawfileheader": rawfileheader,
747 "rawfileheader": rawfileheader,
748 })
748 })
749
749
750 if not req.form.has_key('cmd'):
750 if not req.form.has_key('cmd'):
751 req.form['cmd'] = [self.t.cache['default'],]
751 req.form['cmd'] = [self.t.cache['default'],]
752
752
753 cmd = req.form['cmd'][0]
753 cmd = req.form['cmd'][0]
754
754
755 method = getattr(self, 'do_' + cmd, None)
755 method = getattr(self, 'do_' + cmd, None)
756 if method:
756 if method:
757 method(req)
757 method(req)
758 else:
758 else:
759 req.write(self.t("error"))
759 req.write(self.t("error"))
760
760
761 def stripes(self, parity):
761 def stripes(self, parity):
762 "make horizontal stripes for easier reading"
762 "make horizontal stripes for easier reading"
763 if self.stripecount:
763 if self.stripecount:
764 return (1 + parity / self.stripecount) & 1
764 return (1 + parity / self.stripecount) & 1
765 else:
765 else:
766 return 0
766 return 0
767
767
768 def do_changelog(self, req):
768 def do_changelog(self, req):
769 hi = self.repo.changelog.count() - 1
769 hi = self.repo.changelog.count() - 1
770 if req.form.has_key('rev'):
770 if req.form.has_key('rev'):
771 hi = req.form['rev'][0]
771 hi = req.form['rev'][0]
772 try:
772 try:
773 hi = self.repo.changelog.rev(self.repo.lookup(hi))
773 hi = self.repo.changelog.rev(self.repo.lookup(hi))
774 except hg.RepoError:
774 except hg.RepoError:
775 req.write(self.search(hi)) # XXX redirect to 404 page?
775 req.write(self.search(hi)) # XXX redirect to 404 page?
776 return
776 return
777
777
778 req.write(self.changelog(hi))
778 req.write(self.changelog(hi))
779
779
780 def do_shortlog(self, req):
780 def do_shortlog(self, req):
781 hi = self.repo.changelog.count() - 1
781 hi = self.repo.changelog.count() - 1
782 if req.form.has_key('rev'):
782 if req.form.has_key('rev'):
783 hi = req.form['rev'][0]
783 hi = req.form['rev'][0]
784 try:
784 try:
785 hi = self.repo.changelog.rev(self.repo.lookup(hi))
785 hi = self.repo.changelog.rev(self.repo.lookup(hi))
786 except hg.RepoError:
786 except hg.RepoError:
787 req.write(self.search(hi)) # XXX redirect to 404 page?
787 req.write(self.search(hi)) # XXX redirect to 404 page?
788 return
788 return
789
789
790 req.write(self.changelog(hi, shortlog = True))
790 req.write(self.changelog(hi, shortlog = True))
791
791
792 def do_changeset(self, req):
792 def do_changeset(self, req):
793 req.write(self.changeset(req.form['node'][0]))
793 req.write(self.changeset(req.form['node'][0]))
794
794
795 def do_manifest(self, req):
795 def do_manifest(self, req):
796 req.write(self.manifest(req.form['manifest'][0],
796 req.write(self.manifest(req.form['manifest'][0],
797 self.cleanpath(req.form['path'][0])))
797 self.cleanpath(req.form['path'][0])))
798
798
799 def do_tags(self, req):
799 def do_tags(self, req):
800 req.write(self.tags())
800 req.write(self.tags())
801
801
802 def do_summary(self, req):
802 def do_summary(self, req):
803 req.write(self.summary())
803 req.write(self.summary())
804
804
805 def do_filediff(self, req):
805 def do_filediff(self, req):
806 req.write(self.filediff(self.cleanpath(req.form['file'][0]),
806 req.write(self.filediff(self.cleanpath(req.form['file'][0]),
807 req.form['node'][0]))
807 req.form['node'][0]))
808
808
809 def do_file(self, req):
809 def do_file(self, req):
810 req.write(self.filerevision(self.cleanpath(req.form['file'][0]),
810 req.write(self.filerevision(self.cleanpath(req.form['file'][0]),
811 req.form['filenode'][0]))
811 req.form['filenode'][0]))
812
812
813 def do_annotate(self, req):
813 def do_annotate(self, req):
814 req.write(self.fileannotate(self.cleanpath(req.form['file'][0]),
814 req.write(self.fileannotate(self.cleanpath(req.form['file'][0]),
815 req.form['filenode'][0]))
815 req.form['filenode'][0]))
816
816
817 def do_filelog(self, req):
817 def do_filelog(self, req):
818 req.write(self.filelog(self.cleanpath(req.form['file'][0]),
818 req.write(self.filelog(self.cleanpath(req.form['file'][0]),
819 req.form['filenode'][0]))
819 req.form['filenode'][0]))
820
820
821 def do_heads(self, req):
821 def do_heads(self, req):
822 resp = " ".join(map(hex, self.repo.heads())) + "\n"
822 resp = " ".join(map(hex, self.repo.heads())) + "\n"
823 req.httphdr("application/mercurial-0.1", length=len(resp))
823 req.httphdr("application/mercurial-0.1", length=len(resp))
824 req.write(resp)
824 req.write(resp)
825
825
826 def do_branches(self, req):
826 def do_branches(self, req):
827 nodes = []
827 nodes = []
828 if req.form.has_key('nodes'):
828 if req.form.has_key('nodes'):
829 nodes = map(bin, req.form['nodes'][0].split(" "))
829 nodes = map(bin, req.form['nodes'][0].split(" "))
830 resp = cStringIO.StringIO()
830 resp = cStringIO.StringIO()
831 for b in self.repo.branches(nodes):
831 for b in self.repo.branches(nodes):
832 resp.write(" ".join(map(hex, b)) + "\n")
832 resp.write(" ".join(map(hex, b)) + "\n")
833 resp = resp.getvalue()
833 resp = resp.getvalue()
834 req.httphdr("application/mercurial-0.1", length=len(resp))
834 req.httphdr("application/mercurial-0.1", length=len(resp))
835 req.write(resp)
835 req.write(resp)
836
836
837 def do_between(self, req):
837 def do_between(self, req):
838 nodes = []
838 nodes = []
839 if req.form.has_key('pairs'):
839 if req.form.has_key('pairs'):
840 pairs = [map(bin, p.split("-"))
840 pairs = [map(bin, p.split("-"))
841 for p in req.form['pairs'][0].split(" ")]
841 for p in req.form['pairs'][0].split(" ")]
842 resp = cStringIO.StringIO()
842 resp = cStringIO.StringIO()
843 for b in self.repo.between(pairs):
843 for b in self.repo.between(pairs):
844 resp.write(" ".join(map(hex, b)) + "\n")
844 resp.write(" ".join(map(hex, b)) + "\n")
845 resp = resp.getvalue()
845 resp = resp.getvalue()
846 req.httphdr("application/mercurial-0.1", length=len(resp))
846 req.httphdr("application/mercurial-0.1", length=len(resp))
847 req.write(resp)
847 req.write(resp)
848
848
849 def do_changegroup(self, req):
849 def do_changegroup(self, req):
850 req.httphdr("application/mercurial-0.1")
850 req.httphdr("application/mercurial-0.1")
851 nodes = []
851 nodes = []
852 if not self.allowpull:
852 if not self.allowpull:
853 return
853 return
854
854
855 if req.form.has_key('roots'):
855 if req.form.has_key('roots'):
856 nodes = map(bin, req.form['roots'][0].split(" "))
856 nodes = map(bin, req.form['roots'][0].split(" "))
857
857
858 z = zlib.compressobj()
858 z = zlib.compressobj()
859 f = self.repo.changegroup(nodes, 'serve')
859 f = self.repo.changegroup(nodes, 'serve')
860 while 1:
860 while 1:
861 chunk = f.read(4096)
861 chunk = f.read(4096)
862 if not chunk:
862 if not chunk:
863 break
863 break
864 req.write(z.compress(chunk))
864 req.write(z.compress(chunk))
865
865
866 req.write(z.flush())
866 req.write(z.flush())
867
867
868 def do_archive(self, req):
868 def do_archive(self, req):
869 changeset = self.repo.lookup(req.form['node'][0])
869 changeset = self.repo.lookup(req.form['node'][0])
870 type_ = req.form['type'][0]
870 type_ = req.form['type'][0]
871 allowed = self.repo.ui.configlist("web", "allow_archive")
871 allowed = self.repo.ui.configlist("web", "allow_archive")
872 if (type_ in self.archives and (type_ in allowed or
872 if (type_ in self.archives and (type_ in allowed or
873 self.repo.ui.configbool("web", "allow" + type_, False))):
873 self.repo.ui.configbool("web", "allow" + type_, False))):
874 self.archive(req, changeset, type_)
874 self.archive(req, changeset, type_)
875 return
875 return
876
876
877 req.write(self.t("error"))
877 req.write(self.t("error"))
878
878
879 def do_static(self, req):
879 def do_static(self, req):
880 fname = req.form['file'][0]
880 fname = req.form['file'][0]
881 static = self.repo.ui.config("web", "static",
881 static = self.repo.ui.config("web", "static",
882 os.path.join(self.templatepath,
882 os.path.join(self.templatepath,
883 "static"))
883 "static"))
884 req.write(staticfile(static, fname, req)
884 req.write(staticfile(static, fname, req)
885 or self.t("error", error="%r not found" % fname))
885 or self.t("error", error="%r not found" % fname))
886
886
887 def do_capabilities(self, req):
887 def do_capabilities(self, req):
888 caps = ['unbundle']
888 caps = ['unbundle']
889 if self.repo.ui.configbool('server', 'uncompressed'):
889 if self.repo.ui.configbool('server', 'uncompressed'):
890 caps.append('stream=%d' % self.repo.revlogversion)
890 caps.append('stream=%d' % self.repo.revlogversion)
891 resp = ' '.join(caps)
891 resp = ' '.join(caps)
892 req.httphdr("application/mercurial-0.1", length=len(resp))
892 req.httphdr("application/mercurial-0.1", length=len(resp))
893 req.write(resp)
893 req.write(resp)
894
894
895 def check_perm(self, req, op, default):
895 def check_perm(self, req, op, default):
896 '''check permission for operation based on user auth.
896 '''check permission for operation based on user auth.
897 return true if op allowed, else false.
897 return true if op allowed, else false.
898 default is policy to use if no config given.'''
898 default is policy to use if no config given.'''
899
899
900 user = req.env.get('REMOTE_USER')
900 user = req.env.get('REMOTE_USER')
901
901
902 deny = self.repo.ui.configlist('web', 'deny_' + op)
902 deny = self.repo.ui.configlist('web', 'deny_' + op)
903 if deny and (not user or deny == ['*'] or user in deny):
903 if deny and (not user or deny == ['*'] or user in deny):
904 return False
904 return False
905
905
906 allow = self.repo.ui.configlist('web', 'allow_' + op)
906 allow = self.repo.ui.configlist('web', 'allow_' + op)
907 return (allow and (allow == ['*'] or user in allow)) or default
907 return (allow and (allow == ['*'] or user in allow)) or default
908
908
909 def do_unbundle(self, req):
909 def do_unbundle(self, req):
910 def bail(response, headers={}):
910 def bail(response, headers={}):
911 length = int(req.env['CONTENT_LENGTH'])
911 length = int(req.env['CONTENT_LENGTH'])
912 for s in util.filechunkiter(req, limit=length):
912 for s in util.filechunkiter(req, limit=length):
913 # drain incoming bundle, else client will not see
913 # drain incoming bundle, else client will not see
914 # response when run outside cgi script
914 # response when run outside cgi script
915 pass
915 pass
916 req.httphdr("application/mercurial-0.1", headers=headers)
916 req.httphdr("application/mercurial-0.1", headers=headers)
917 req.write('0\n')
917 req.write('0\n')
918 req.write(response)
918 req.write(response)
919
919
920 # require ssl by default, auth info cannot be sniffed and
920 # require ssl by default, auth info cannot be sniffed and
921 # replayed
921 # replayed
922 ssl_req = self.repo.ui.configbool('web', 'push_ssl', True)
922 ssl_req = self.repo.ui.configbool('web', 'push_ssl', True)
923 if ssl_req:
923 if ssl_req:
924 if not req.env.get('HTTPS'):
924 if not req.env.get('HTTPS'):
925 bail(_('ssl required\n'))
925 bail(_('ssl required\n'))
926 return
926 return
927 proto = 'https'
927 proto = 'https'
928 else:
928 else:
929 proto = 'http'
929 proto = 'http'
930
930
931 # do not allow push unless explicitly allowed
931 # do not allow push unless explicitly allowed
932 if not self.check_perm(req, 'push', False):
932 if not self.check_perm(req, 'push', False):
933 bail(_('push not authorized\n'),
933 bail(_('push not authorized\n'),
934 headers={'status': '401 Unauthorized'})
934 headers={'status': '401 Unauthorized'})
935 return
935 return
936
936
937 req.httphdr("application/mercurial-0.1")
937 req.httphdr("application/mercurial-0.1")
938
938
939 their_heads = req.form['heads'][0].split(' ')
939 their_heads = req.form['heads'][0].split(' ')
940
940
941 def check_heads():
941 def check_heads():
942 heads = map(hex, self.repo.heads())
942 heads = map(hex, self.repo.heads())
943 return their_heads == [hex('force')] or their_heads == heads
943 return their_heads == [hex('force')] or their_heads == heads
944
944
945 # fail early if possible
945 # fail early if possible
946 if not check_heads():
946 if not check_heads():
947 bail(_('unsynced changes\n'))
947 bail(_('unsynced changes\n'))
948 return
948 return
949
949
950 # do not lock repo until all changegroup data is
950 # do not lock repo until all changegroup data is
951 # streamed. save to temporary file.
951 # streamed. save to temporary file.
952
952
953 fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
953 fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
954 fp = os.fdopen(fd, 'wb+')
954 fp = os.fdopen(fd, 'wb+')
955 try:
955 try:
956 length = int(req.env['CONTENT_LENGTH'])
956 length = int(req.env['CONTENT_LENGTH'])
957 for s in util.filechunkiter(req, limit=length):
957 for s in util.filechunkiter(req, limit=length):
958 fp.write(s)
958 fp.write(s)
959
959
960 lock = self.repo.lock()
960 lock = self.repo.lock()
961 try:
961 try:
962 if not check_heads():
962 if not check_heads():
963 req.write('0\n')
963 req.write('0\n')
964 req.write(_('unsynced changes\n'))
964 req.write(_('unsynced changes\n'))
965 return
965 return
966
966
967 fp.seek(0)
967 fp.seek(0)
968
968
969 # send addchangegroup output to client
969 # send addchangegroup output to client
970
970
971 old_stdout = sys.stdout
971 old_stdout = sys.stdout
972 sys.stdout = cStringIO.StringIO()
972 sys.stdout = cStringIO.StringIO()
973
973
974 try:
974 try:
975 url = 'remote:%s:%s' % (proto,
975 url = 'remote:%s:%s' % (proto,
976 req.env.get('REMOTE_HOST', ''))
976 req.env.get('REMOTE_HOST', ''))
977 ret = self.repo.addchangegroup(fp, 'serve', url)
977 ret = self.repo.addchangegroup(fp, 'serve', url)
978 finally:
978 finally:
979 val = sys.stdout.getvalue()
979 val = sys.stdout.getvalue()
980 sys.stdout = old_stdout
980 sys.stdout = old_stdout
981 req.write('%d\n' % ret)
981 req.write('%d\n' % ret)
982 req.write(val)
982 req.write(val)
983 finally:
983 finally:
984 lock.release()
984 lock.release()
985 finally:
985 finally:
986 fp.close()
986 fp.close()
987 os.unlink(tempname)
987 os.unlink(tempname)
988
988
989 def do_stream_out(self, req):
989 def do_stream_out(self, req):
990 req.httphdr("application/mercurial-0.1")
990 req.httphdr("application/mercurial-0.1")
991 streamclone.stream_out(self.repo, req)
991 streamclone.stream_out(self.repo, req)
@@ -1,1758 +1,1757
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import gettext as _
9 from i18n import gettext as _
10 from demandload import *
10 from demandload import *
11 import repo
11 import repo
12 demandload(globals(), "appendfile changegroup")
12 demandload(globals(), "appendfile changegroup")
13 demandload(globals(), "changelog dirstate filelog manifest context")
13 demandload(globals(), "changelog dirstate filelog manifest context")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 demandload(globals(), "os revlog time util")
15 demandload(globals(), "os revlog time util")
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = ()
18 capabilities = ()
19
19
20 def __del__(self):
20 def __del__(self):
21 self.transhandle = None
21 self.transhandle = None
22 def __init__(self, parentui, path=None, create=0):
22 def __init__(self, parentui, path=None, create=0):
23 repo.repository.__init__(self)
23 repo.repository.__init__(self)
24 if not path:
24 if not path:
25 p = os.getcwd()
25 p = os.getcwd()
26 while not os.path.isdir(os.path.join(p, ".hg")):
26 while not os.path.isdir(os.path.join(p, ".hg")):
27 oldp = p
27 oldp = p
28 p = os.path.dirname(p)
28 p = os.path.dirname(p)
29 if p == oldp:
29 if p == oldp:
30 raise repo.RepoError(_("no repo found"))
30 raise repo.RepoError(_("no repo found"))
31 path = p
31 path = p
32 self.path = os.path.join(path, ".hg")
32 self.path = os.path.join(path, ".hg")
33
33
34 if not create and not os.path.isdir(self.path):
34 if not create and not os.path.isdir(self.path):
35 raise repo.RepoError(_("repository %s not found") % path)
35 raise repo.RepoError(_("repository %s not found") % path)
36
36
37 self.root = os.path.abspath(path)
37 self.root = os.path.abspath(path)
38 self.origroot = path
38 self.origroot = path
39 self.ui = ui.ui(parentui=parentui)
39 self.ui = ui.ui(parentui=parentui)
40 self.opener = util.opener(self.path)
40 self.opener = util.opener(self.path)
41 self.wopener = util.opener(self.root)
41 self.wopener = util.opener(self.root)
42
42
43 try:
43 try:
44 self.ui.readconfig(self.join("hgrc"), self.root)
44 self.ui.readconfig(self.join("hgrc"), self.root)
45 except IOError:
45 except IOError:
46 pass
46 pass
47
47
48 v = self.ui.revlogopts
48 v = self.ui.revlogopts
49 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
49 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
50 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
50 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
51 fl = v.get('flags', None)
51 fl = v.get('flags', None)
52 flags = 0
52 flags = 0
53 if fl != None:
53 if fl != None:
54 for x in fl.split():
54 for x in fl.split():
55 flags |= revlog.flagstr(x)
55 flags |= revlog.flagstr(x)
56 elif self.revlogv1:
56 elif self.revlogv1:
57 flags = revlog.REVLOG_DEFAULT_FLAGS
57 flags = revlog.REVLOG_DEFAULT_FLAGS
58
58
59 v = self.revlogversion | flags
59 v = self.revlogversion | flags
60 self.manifest = manifest.manifest(self.opener, v)
60 self.manifest = manifest.manifest(self.opener, v)
61 self.changelog = changelog.changelog(self.opener, v)
61 self.changelog = changelog.changelog(self.opener, v)
62
62
63 # the changelog might not have the inline index flag
63 # the changelog might not have the inline index flag
64 # on. If the format of the changelog is the same as found in
64 # on. If the format of the changelog is the same as found in
65 # .hgrc, apply any flags found in the .hgrc as well.
65 # .hgrc, apply any flags found in the .hgrc as well.
66 # Otherwise, just version from the changelog
66 # Otherwise, just version from the changelog
67 v = self.changelog.version
67 v = self.changelog.version
68 if v == self.revlogversion:
68 if v == self.revlogversion:
69 v |= flags
69 v |= flags
70 self.revlogversion = v
70 self.revlogversion = v
71
71
72 self.tagscache = None
72 self.tagscache = None
73 self.nodetagscache = None
73 self.nodetagscache = None
74 self.encodepats = None
74 self.encodepats = None
75 self.decodepats = None
75 self.decodepats = None
76 self.transhandle = None
76 self.transhandle = None
77
77
78 if create:
78 if create:
79 if not os.path.exists(path):
79 if not os.path.exists(path):
80 os.mkdir(path)
80 os.mkdir(path)
81 os.mkdir(self.path)
81 os.mkdir(self.path)
82 os.mkdir(self.join("data"))
82 os.mkdir(self.join("data"))
83
83
84 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
84 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
85
85
86 def url(self):
86 def url(self):
87 return 'file:' + self.root
87 return 'file:' + self.root
88
88
89 def hook(self, name, throw=False, **args):
89 def hook(self, name, throw=False, **args):
90 def callhook(hname, funcname):
90 def callhook(hname, funcname):
91 '''call python hook. hook is callable object, looked up as
91 '''call python hook. hook is callable object, looked up as
92 name in python module. if callable returns "true", hook
92 name in python module. if callable returns "true", hook
93 fails, else passes. if hook raises exception, treated as
93 fails, else passes. if hook raises exception, treated as
94 hook failure. exception propagates if throw is "true".
94 hook failure. exception propagates if throw is "true".
95
95
96 reason for "true" meaning "hook failed" is so that
96 reason for "true" meaning "hook failed" is so that
97 unmodified commands (e.g. mercurial.commands.update) can
97 unmodified commands (e.g. mercurial.commands.update) can
98 be run as hooks without wrappers to convert return values.'''
98 be run as hooks without wrappers to convert return values.'''
99
99
100 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
100 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
101 d = funcname.rfind('.')
101 d = funcname.rfind('.')
102 if d == -1:
102 if d == -1:
103 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
103 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
104 % (hname, funcname))
104 % (hname, funcname))
105 modname = funcname[:d]
105 modname = funcname[:d]
106 try:
106 try:
107 obj = __import__(modname)
107 obj = __import__(modname)
108 except ImportError:
108 except ImportError:
109 try:
109 try:
110 # extensions are loaded with hgext_ prefix
110 # extensions are loaded with hgext_ prefix
111 obj = __import__("hgext_%s" % modname)
111 obj = __import__("hgext_%s" % modname)
112 except ImportError:
112 except ImportError:
113 raise util.Abort(_('%s hook is invalid '
113 raise util.Abort(_('%s hook is invalid '
114 '(import of "%s" failed)') %
114 '(import of "%s" failed)') %
115 (hname, modname))
115 (hname, modname))
116 try:
116 try:
117 for p in funcname.split('.')[1:]:
117 for p in funcname.split('.')[1:]:
118 obj = getattr(obj, p)
118 obj = getattr(obj, p)
119 except AttributeError, err:
119 except AttributeError, err:
120 raise util.Abort(_('%s hook is invalid '
120 raise util.Abort(_('%s hook is invalid '
121 '("%s" is not defined)') %
121 '("%s" is not defined)') %
122 (hname, funcname))
122 (hname, funcname))
123 if not callable(obj):
123 if not callable(obj):
124 raise util.Abort(_('%s hook is invalid '
124 raise util.Abort(_('%s hook is invalid '
125 '("%s" is not callable)') %
125 '("%s" is not callable)') %
126 (hname, funcname))
126 (hname, funcname))
127 try:
127 try:
128 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
128 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
129 except (KeyboardInterrupt, util.SignalInterrupt):
129 except (KeyboardInterrupt, util.SignalInterrupt):
130 raise
130 raise
131 except Exception, exc:
131 except Exception, exc:
132 if isinstance(exc, util.Abort):
132 if isinstance(exc, util.Abort):
133 self.ui.warn(_('error: %s hook failed: %s\n') %
133 self.ui.warn(_('error: %s hook failed: %s\n') %
134 (hname, exc.args[0] % exc.args[1:]))
134 (hname, exc.args[0] % exc.args[1:]))
135 else:
135 else:
136 self.ui.warn(_('error: %s hook raised an exception: '
136 self.ui.warn(_('error: %s hook raised an exception: '
137 '%s\n') % (hname, exc))
137 '%s\n') % (hname, exc))
138 if throw:
138 if throw:
139 raise
139 raise
140 self.ui.print_exc()
140 self.ui.print_exc()
141 return True
141 return True
142 if r:
142 if r:
143 if throw:
143 if throw:
144 raise util.Abort(_('%s hook failed') % hname)
144 raise util.Abort(_('%s hook failed') % hname)
145 self.ui.warn(_('warning: %s hook failed\n') % hname)
145 self.ui.warn(_('warning: %s hook failed\n') % hname)
146 return r
146 return r
147
147
148 def runhook(name, cmd):
148 def runhook(name, cmd):
149 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
149 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
150 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
150 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
151 r = util.system(cmd, environ=env, cwd=self.root)
151 r = util.system(cmd, environ=env, cwd=self.root)
152 if r:
152 if r:
153 desc, r = util.explain_exit(r)
153 desc, r = util.explain_exit(r)
154 if throw:
154 if throw:
155 raise util.Abort(_('%s hook %s') % (name, desc))
155 raise util.Abort(_('%s hook %s') % (name, desc))
156 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
156 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
157 return r
157 return r
158
158
159 r = False
159 r = False
160 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
160 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
161 if hname.split(".", 1)[0] == name and cmd]
161 if hname.split(".", 1)[0] == name and cmd]
162 hooks.sort()
162 hooks.sort()
163 for hname, cmd in hooks:
163 for hname, cmd in hooks:
164 if cmd.startswith('python:'):
164 if cmd.startswith('python:'):
165 r = callhook(hname, cmd[7:].strip()) or r
165 r = callhook(hname, cmd[7:].strip()) or r
166 else:
166 else:
167 r = runhook(hname, cmd) or r
167 r = runhook(hname, cmd) or r
168 return r
168 return r
169
169
170 tag_disallowed = ':\r\n'
170 tag_disallowed = ':\r\n'
171
171
172 def tag(self, name, node, local=False, message=None, user=None, date=None):
172 def tag(self, name, node, local=False, message=None, user=None, date=None):
173 '''tag a revision with a symbolic name.
173 '''tag a revision with a symbolic name.
174
174
175 if local is True, the tag is stored in a per-repository file.
175 if local is True, the tag is stored in a per-repository file.
176 otherwise, it is stored in the .hgtags file, and a new
176 otherwise, it is stored in the .hgtags file, and a new
177 changeset is committed with the change.
177 changeset is committed with the change.
178
178
179 keyword arguments:
179 keyword arguments:
180
180
181 local: whether to store tag in non-version-controlled file
181 local: whether to store tag in non-version-controlled file
182 (default False)
182 (default False)
183
183
184 message: commit message to use if committing
184 message: commit message to use if committing
185
185
186 user: name of user to use if committing
186 user: name of user to use if committing
187
187
188 date: date tuple to use if committing'''
188 date: date tuple to use if committing'''
189
189
190 for c in self.tag_disallowed:
190 for c in self.tag_disallowed:
191 if c in name:
191 if c in name:
192 raise util.Abort(_('%r cannot be used in a tag name') % c)
192 raise util.Abort(_('%r cannot be used in a tag name') % c)
193
193
194 self.hook('pretag', throw=True, node=node, tag=name, local=local)
194 self.hook('pretag', throw=True, node=node, tag=name, local=local)
195
195
196 if local:
196 if local:
197 self.opener('localtags', 'a').write('%s %s\n' % (node, name))
197 self.opener('localtags', 'a').write('%s %s\n' % (node, name))
198 self.hook('tag', node=node, tag=name, local=local)
198 self.hook('tag', node=node, tag=name, local=local)
199 return
199 return
200
200
201 for x in self.changes():
201 for x in self.changes():
202 if '.hgtags' in x:
202 if '.hgtags' in x:
203 raise util.Abort(_('working copy of .hgtags is changed '
203 raise util.Abort(_('working copy of .hgtags is changed '
204 '(please commit .hgtags manually)'))
204 '(please commit .hgtags manually)'))
205
205
206 self.wfile('.hgtags', 'ab').write('%s %s\n' % (node, name))
206 self.wfile('.hgtags', 'ab').write('%s %s\n' % (node, name))
207 if self.dirstate.state('.hgtags') == '?':
207 if self.dirstate.state('.hgtags') == '?':
208 self.add(['.hgtags'])
208 self.add(['.hgtags'])
209
209
210 if not message:
210 if not message:
211 message = _('Added tag %s for changeset %s') % (name, node)
211 message = _('Added tag %s for changeset %s') % (name, node)
212
212
213 self.commit(['.hgtags'], message, user, date)
213 self.commit(['.hgtags'], message, user, date)
214 self.hook('tag', node=node, tag=name, local=local)
214 self.hook('tag', node=node, tag=name, local=local)
215
215
216 def tags(self):
216 def tags(self):
217 '''return a mapping of tag to node'''
217 '''return a mapping of tag to node'''
218 if not self.tagscache:
218 if not self.tagscache:
219 self.tagscache = {}
219 self.tagscache = {}
220
220
221 def parsetag(line, context):
221 def parsetag(line, context):
222 if not line:
222 if not line:
223 return
223 return
224 s = l.split(" ", 1)
224 s = l.split(" ", 1)
225 if len(s) != 2:
225 if len(s) != 2:
226 self.ui.warn(_("%s: cannot parse entry\n") % context)
226 self.ui.warn(_("%s: cannot parse entry\n") % context)
227 return
227 return
228 node, key = s
228 node, key = s
229 key = key.strip()
229 key = key.strip()
230 try:
230 try:
231 bin_n = bin(node)
231 bin_n = bin(node)
232 except TypeError:
232 except TypeError:
233 self.ui.warn(_("%s: node '%s' is not well formed\n") %
233 self.ui.warn(_("%s: node '%s' is not well formed\n") %
234 (context, node))
234 (context, node))
235 return
235 return
236 if bin_n not in self.changelog.nodemap:
236 if bin_n not in self.changelog.nodemap:
237 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
237 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
238 (context, key))
238 (context, key))
239 return
239 return
240 self.tagscache[key] = bin_n
240 self.tagscache[key] = bin_n
241
241
242 # read the tags file from each head, ending with the tip,
242 # read the tags file from each head, ending with the tip,
243 # and add each tag found to the map, with "newer" ones
243 # and add each tag found to the map, with "newer" ones
244 # taking precedence
244 # taking precedence
245 heads = self.heads()
245 heads = self.heads()
246 heads.reverse()
246 heads.reverse()
247 fl = self.file(".hgtags")
247 fl = self.file(".hgtags")
248 for node in heads:
248 for node in heads:
249 change = self.changelog.read(node)
249 change = self.changelog.read(node)
250 rev = self.changelog.rev(node)
250 rev = self.changelog.rev(node)
251 fn, ff = self.manifest.find(change[0], '.hgtags')
251 fn, ff = self.manifest.find(change[0], '.hgtags')
252 if fn is None: continue
252 if fn is None: continue
253 count = 0
253 count = 0
254 for l in fl.read(fn).splitlines():
254 for l in fl.read(fn).splitlines():
255 count += 1
255 count += 1
256 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
256 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
257 (rev, short(node), count))
257 (rev, short(node), count))
258 try:
258 try:
259 f = self.opener("localtags")
259 f = self.opener("localtags")
260 count = 0
260 count = 0
261 for l in f:
261 for l in f:
262 count += 1
262 count += 1
263 parsetag(l, _("localtags, line %d") % count)
263 parsetag(l, _("localtags, line %d") % count)
264 except IOError:
264 except IOError:
265 pass
265 pass
266
266
267 self.tagscache['tip'] = self.changelog.tip()
267 self.tagscache['tip'] = self.changelog.tip()
268
268
269 return self.tagscache
269 return self.tagscache
270
270
271 def tagslist(self):
271 def tagslist(self):
272 '''return a list of tags ordered by revision'''
272 '''return a list of tags ordered by revision'''
273 l = []
273 l = []
274 for t, n in self.tags().items():
274 for t, n in self.tags().items():
275 try:
275 try:
276 r = self.changelog.rev(n)
276 r = self.changelog.rev(n)
277 except:
277 except:
278 r = -2 # sort to the beginning of the list if unknown
278 r = -2 # sort to the beginning of the list if unknown
279 l.append((r, t, n))
279 l.append((r, t, n))
280 l.sort()
280 l.sort()
281 return [(t, n) for r, t, n in l]
281 return [(t, n) for r, t, n in l]
282
282
283 def nodetags(self, node):
283 def nodetags(self, node):
284 '''return the tags associated with a node'''
284 '''return the tags associated with a node'''
285 if not self.nodetagscache:
285 if not self.nodetagscache:
286 self.nodetagscache = {}
286 self.nodetagscache = {}
287 for t, n in self.tags().items():
287 for t, n in self.tags().items():
288 self.nodetagscache.setdefault(n, []).append(t)
288 self.nodetagscache.setdefault(n, []).append(t)
289 return self.nodetagscache.get(node, [])
289 return self.nodetagscache.get(node, [])
290
290
291 def lookup(self, key):
291 def lookup(self, key):
292 try:
292 try:
293 return self.tags()[key]
293 return self.tags()[key]
294 except KeyError:
294 except KeyError:
295 if key == '.':
295 if key == '.':
296 key = self.dirstate.parents()[0]
296 key = self.dirstate.parents()[0]
297 if key == nullid:
297 if key == nullid:
298 raise repo.RepoError(_("no revision checked out"))
298 raise repo.RepoError(_("no revision checked out"))
299 try:
299 try:
300 return self.changelog.lookup(key)
300 return self.changelog.lookup(key)
301 except:
301 except:
302 raise repo.RepoError(_("unknown revision '%s'") % key)
302 raise repo.RepoError(_("unknown revision '%s'") % key)
303
303
304 def dev(self):
304 def dev(self):
305 return os.lstat(self.path).st_dev
305 return os.lstat(self.path).st_dev
306
306
307 def local(self):
307 def local(self):
308 return True
308 return True
309
309
310 def join(self, f):
310 def join(self, f):
311 return os.path.join(self.path, f)
311 return os.path.join(self.path, f)
312
312
313 def wjoin(self, f):
313 def wjoin(self, f):
314 return os.path.join(self.root, f)
314 return os.path.join(self.root, f)
315
315
316 def file(self, f):
316 def file(self, f):
317 if f[0] == '/':
317 if f[0] == '/':
318 f = f[1:]
318 f = f[1:]
319 return filelog.filelog(self.opener, f, self.revlogversion)
319 return filelog.filelog(self.opener, f, self.revlogversion)
320
320
321 def changectx(self, changeid):
321 def changectx(self, changeid):
322 return context.changectx(self, changeid)
322 return context.changectx(self, changeid)
323
323
324 def filectx(self, path, changeid=None, fileid=None):
324 def filectx(self, path, changeid=None, fileid=None):
325 """changeid can be a changeset revision, node, or tag.
325 """changeid can be a changeset revision, node, or tag.
326 fileid can be a file revision or node."""
326 fileid can be a file revision or node."""
327 return context.filectx(self, path, changeid, fileid)
327 return context.filectx(self, path, changeid, fileid)
328
328
329 def getcwd(self):
329 def getcwd(self):
330 return self.dirstate.getcwd()
330 return self.dirstate.getcwd()
331
331
332 def wfile(self, f, mode='r'):
332 def wfile(self, f, mode='r'):
333 return self.wopener(f, mode)
333 return self.wopener(f, mode)
334
334
335 def wread(self, filename):
335 def wread(self, filename):
336 if self.encodepats == None:
336 if self.encodepats == None:
337 l = []
337 l = []
338 for pat, cmd in self.ui.configitems("encode"):
338 for pat, cmd in self.ui.configitems("encode"):
339 mf = util.matcher(self.root, "", [pat], [], [])[1]
339 mf = util.matcher(self.root, "", [pat], [], [])[1]
340 l.append((mf, cmd))
340 l.append((mf, cmd))
341 self.encodepats = l
341 self.encodepats = l
342
342
343 data = self.wopener(filename, 'r').read()
343 data = self.wopener(filename, 'r').read()
344
344
345 for mf, cmd in self.encodepats:
345 for mf, cmd in self.encodepats:
346 if mf(filename):
346 if mf(filename):
347 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
347 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
348 data = util.filter(data, cmd)
348 data = util.filter(data, cmd)
349 break
349 break
350
350
351 return data
351 return data
352
352
353 def wwrite(self, filename, data, fd=None):
353 def wwrite(self, filename, data, fd=None):
354 if self.decodepats == None:
354 if self.decodepats == None:
355 l = []
355 l = []
356 for pat, cmd in self.ui.configitems("decode"):
356 for pat, cmd in self.ui.configitems("decode"):
357 mf = util.matcher(self.root, "", [pat], [], [])[1]
357 mf = util.matcher(self.root, "", [pat], [], [])[1]
358 l.append((mf, cmd))
358 l.append((mf, cmd))
359 self.decodepats = l
359 self.decodepats = l
360
360
361 for mf, cmd in self.decodepats:
361 for mf, cmd in self.decodepats:
362 if mf(filename):
362 if mf(filename):
363 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
363 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
364 data = util.filter(data, cmd)
364 data = util.filter(data, cmd)
365 break
365 break
366
366
367 if fd:
367 if fd:
368 return fd.write(data)
368 return fd.write(data)
369 return self.wopener(filename, 'w').write(data)
369 return self.wopener(filename, 'w').write(data)
370
370
371 def transaction(self):
371 def transaction(self):
372 tr = self.transhandle
372 tr = self.transhandle
373 if tr != None and tr.running():
373 if tr != None and tr.running():
374 return tr.nest()
374 return tr.nest()
375
375
376 # save dirstate for rollback
376 # save dirstate for rollback
377 try:
377 try:
378 ds = self.opener("dirstate").read()
378 ds = self.opener("dirstate").read()
379 except IOError:
379 except IOError:
380 ds = ""
380 ds = ""
381 self.opener("journal.dirstate", "w").write(ds)
381 self.opener("journal.dirstate", "w").write(ds)
382
382
383 tr = transaction.transaction(self.ui.warn, self.opener,
383 tr = transaction.transaction(self.ui.warn, self.opener,
384 self.join("journal"),
384 self.join("journal"),
385 aftertrans(self.path))
385 aftertrans(self.path))
386 self.transhandle = tr
386 self.transhandle = tr
387 return tr
387 return tr
388
388
389 def recover(self):
389 def recover(self):
390 l = self.lock()
390 l = self.lock()
391 if os.path.exists(self.join("journal")):
391 if os.path.exists(self.join("journal")):
392 self.ui.status(_("rolling back interrupted transaction\n"))
392 self.ui.status(_("rolling back interrupted transaction\n"))
393 transaction.rollback(self.opener, self.join("journal"))
393 transaction.rollback(self.opener, self.join("journal"))
394 self.reload()
394 self.reload()
395 return True
395 return True
396 else:
396 else:
397 self.ui.warn(_("no interrupted transaction available\n"))
397 self.ui.warn(_("no interrupted transaction available\n"))
398 return False
398 return False
399
399
400 def rollback(self, wlock=None):
400 def rollback(self, wlock=None):
401 if not wlock:
401 if not wlock:
402 wlock = self.wlock()
402 wlock = self.wlock()
403 l = self.lock()
403 l = self.lock()
404 if os.path.exists(self.join("undo")):
404 if os.path.exists(self.join("undo")):
405 self.ui.status(_("rolling back last transaction\n"))
405 self.ui.status(_("rolling back last transaction\n"))
406 transaction.rollback(self.opener, self.join("undo"))
406 transaction.rollback(self.opener, self.join("undo"))
407 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
407 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
408 self.reload()
408 self.reload()
409 self.wreload()
409 self.wreload()
410 else:
410 else:
411 self.ui.warn(_("no rollback information available\n"))
411 self.ui.warn(_("no rollback information available\n"))
412
412
413 def wreload(self):
413 def wreload(self):
414 self.dirstate.read()
414 self.dirstate.read()
415
415
416 def reload(self):
416 def reload(self):
417 self.changelog.load()
417 self.changelog.load()
418 self.manifest.load()
418 self.manifest.load()
419 self.tagscache = None
419 self.tagscache = None
420 self.nodetagscache = None
420 self.nodetagscache = None
421
421
422 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
422 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
423 desc=None):
423 desc=None):
424 try:
424 try:
425 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
425 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
426 except lock.LockHeld, inst:
426 except lock.LockHeld, inst:
427 if not wait:
427 if not wait:
428 raise
428 raise
429 self.ui.warn(_("waiting for lock on %s held by %s\n") %
429 self.ui.warn(_("waiting for lock on %s held by %s\n") %
430 (desc, inst.args[0]))
430 (desc, inst.args[0]))
431 # default to 600 seconds timeout
431 # default to 600 seconds timeout
432 l = lock.lock(self.join(lockname),
432 l = lock.lock(self.join(lockname),
433 int(self.ui.config("ui", "timeout") or 600),
433 int(self.ui.config("ui", "timeout") or 600),
434 releasefn, desc=desc)
434 releasefn, desc=desc)
435 if acquirefn:
435 if acquirefn:
436 acquirefn()
436 acquirefn()
437 return l
437 return l
438
438
439 def lock(self, wait=1):
439 def lock(self, wait=1):
440 return self.do_lock("lock", wait, acquirefn=self.reload,
440 return self.do_lock("lock", wait, acquirefn=self.reload,
441 desc=_('repository %s') % self.origroot)
441 desc=_('repository %s') % self.origroot)
442
442
443 def wlock(self, wait=1):
443 def wlock(self, wait=1):
444 return self.do_lock("wlock", wait, self.dirstate.write,
444 return self.do_lock("wlock", wait, self.dirstate.write,
445 self.wreload,
445 self.wreload,
446 desc=_('working directory of %s') % self.origroot)
446 desc=_('working directory of %s') % self.origroot)
447
447
448 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
448 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
449 "determine whether a new filenode is needed"
449 "determine whether a new filenode is needed"
450 fp1 = manifest1.get(filename, nullid)
450 fp1 = manifest1.get(filename, nullid)
451 fp2 = manifest2.get(filename, nullid)
451 fp2 = manifest2.get(filename, nullid)
452
452
453 if fp2 != nullid:
453 if fp2 != nullid:
454 # is one parent an ancestor of the other?
454 # is one parent an ancestor of the other?
455 fpa = filelog.ancestor(fp1, fp2)
455 fpa = filelog.ancestor(fp1, fp2)
456 if fpa == fp1:
456 if fpa == fp1:
457 fp1, fp2 = fp2, nullid
457 fp1, fp2 = fp2, nullid
458 elif fpa == fp2:
458 elif fpa == fp2:
459 fp2 = nullid
459 fp2 = nullid
460
460
461 # is the file unmodified from the parent? report existing entry
461 # is the file unmodified from the parent? report existing entry
462 if fp2 == nullid and text == filelog.read(fp1):
462 if fp2 == nullid and text == filelog.read(fp1):
463 return (fp1, None, None)
463 return (fp1, None, None)
464
464
465 return (None, fp1, fp2)
465 return (None, fp1, fp2)
466
466
467 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
467 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
468 orig_parent = self.dirstate.parents()[0] or nullid
468 orig_parent = self.dirstate.parents()[0] or nullid
469 p1 = p1 or self.dirstate.parents()[0] or nullid
469 p1 = p1 or self.dirstate.parents()[0] or nullid
470 p2 = p2 or self.dirstate.parents()[1] or nullid
470 p2 = p2 or self.dirstate.parents()[1] or nullid
471 c1 = self.changelog.read(p1)
471 c1 = self.changelog.read(p1)
472 c2 = self.changelog.read(p2)
472 c2 = self.changelog.read(p2)
473 m1 = self.manifest.read(c1[0]).copy()
473 m1 = self.manifest.read(c1[0]).copy()
474 m2 = self.manifest.read(c2[0])
474 m2 = self.manifest.read(c2[0])
475 changed = []
475 changed = []
476
476
477 if orig_parent == p1:
477 if orig_parent == p1:
478 update_dirstate = 1
478 update_dirstate = 1
479 else:
479 else:
480 update_dirstate = 0
480 update_dirstate = 0
481
481
482 if not wlock:
482 if not wlock:
483 wlock = self.wlock()
483 wlock = self.wlock()
484 l = self.lock()
484 l = self.lock()
485 tr = self.transaction()
485 tr = self.transaction()
486 linkrev = self.changelog.count()
486 linkrev = self.changelog.count()
487 for f in files:
487 for f in files:
488 try:
488 try:
489 t = self.wread(f)
489 t = self.wread(f)
490 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
490 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
491 r = self.file(f)
491 r = self.file(f)
492
492
493 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
493 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
494 if entry:
494 if entry:
495 m1[f] = entry
495 m1[f] = entry
496 continue
496 continue
497
497
498 m1[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
498 m1[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
499 changed.append(f)
499 changed.append(f)
500 if update_dirstate:
500 if update_dirstate:
501 self.dirstate.update([f], "n")
501 self.dirstate.update([f], "n")
502 except IOError:
502 except IOError:
503 try:
503 try:
504 del m1[f]
504 del m1[f]
505 del m1[f]
506 if update_dirstate:
505 if update_dirstate:
507 self.dirstate.forget([f])
506 self.dirstate.forget([f])
508 except:
507 except:
509 # deleted from p2?
508 # deleted from p2?
510 pass
509 pass
511
510
512 mnode = self.manifest.add(m1, tr, linkrev, c1[0], c2[0])
511 mnode = self.manifest.add(m1, tr, linkrev, c1[0], c2[0])
513 user = user or self.ui.username()
512 user = user or self.ui.username()
514 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
513 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
515 tr.close()
514 tr.close()
516 if update_dirstate:
515 if update_dirstate:
517 self.dirstate.setparents(n, nullid)
516 self.dirstate.setparents(n, nullid)
518
517
519 def commit(self, files=None, text="", user=None, date=None,
518 def commit(self, files=None, text="", user=None, date=None,
520 match=util.always, force=False, lock=None, wlock=None,
519 match=util.always, force=False, lock=None, wlock=None,
521 force_editor=False):
520 force_editor=False):
522 commit = []
521 commit = []
523 remove = []
522 remove = []
524 changed = []
523 changed = []
525
524
526 if files:
525 if files:
527 for f in files:
526 for f in files:
528 s = self.dirstate.state(f)
527 s = self.dirstate.state(f)
529 if s in 'nmai':
528 if s in 'nmai':
530 commit.append(f)
529 commit.append(f)
531 elif s == 'r':
530 elif s == 'r':
532 remove.append(f)
531 remove.append(f)
533 else:
532 else:
534 self.ui.warn(_("%s not tracked!\n") % f)
533 self.ui.warn(_("%s not tracked!\n") % f)
535 else:
534 else:
536 modified, added, removed, deleted, unknown = self.changes(match=match)
535 modified, added, removed, deleted, unknown = self.changes(match=match)
537 commit = modified + added
536 commit = modified + added
538 remove = removed
537 remove = removed
539
538
540 p1, p2 = self.dirstate.parents()
539 p1, p2 = self.dirstate.parents()
541 c1 = self.changelog.read(p1)
540 c1 = self.changelog.read(p1)
542 c2 = self.changelog.read(p2)
541 c2 = self.changelog.read(p2)
543 m1 = self.manifest.read(c1[0]).copy()
542 m1 = self.manifest.read(c1[0]).copy()
544 m2 = self.manifest.read(c2[0])
543 m2 = self.manifest.read(c2[0])
545
544
546 if not commit and not remove and not force and p2 == nullid:
545 if not commit and not remove and not force and p2 == nullid:
547 self.ui.status(_("nothing changed\n"))
546 self.ui.status(_("nothing changed\n"))
548 return None
547 return None
549
548
550 xp1 = hex(p1)
549 xp1 = hex(p1)
551 if p2 == nullid: xp2 = ''
550 if p2 == nullid: xp2 = ''
552 else: xp2 = hex(p2)
551 else: xp2 = hex(p2)
553
552
554 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
553 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
555
554
556 if not wlock:
555 if not wlock:
557 wlock = self.wlock()
556 wlock = self.wlock()
558 if not lock:
557 if not lock:
559 lock = self.lock()
558 lock = self.lock()
560 tr = self.transaction()
559 tr = self.transaction()
561
560
562 # check in files
561 # check in files
563 new = {}
562 new = {}
564 linkrev = self.changelog.count()
563 linkrev = self.changelog.count()
565 commit.sort()
564 commit.sort()
566 for f in commit:
565 for f in commit:
567 self.ui.note(f + "\n")
566 self.ui.note(f + "\n")
568 try:
567 try:
569 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
568 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
570 t = self.wread(f)
569 t = self.wread(f)
571 except IOError:
570 except IOError:
572 self.ui.warn(_("trouble committing %s!\n") % f)
571 self.ui.warn(_("trouble committing %s!\n") % f)
573 raise
572 raise
574
573
575 r = self.file(f)
574 r = self.file(f)
576
575
577 meta = {}
576 meta = {}
578 cp = self.dirstate.copied(f)
577 cp = self.dirstate.copied(f)
579 if cp:
578 if cp:
580 meta["copy"] = cp
579 meta["copy"] = cp
581 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
580 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
582 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
581 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
583 fp1, fp2 = nullid, nullid
582 fp1, fp2 = nullid, nullid
584 else:
583 else:
585 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
584 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
586 if entry:
585 if entry:
587 new[f] = entry
586 new[f] = entry
588 continue
587 continue
589
588
590 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
589 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
591 # remember what we've added so that we can later calculate
590 # remember what we've added so that we can later calculate
592 # the files to pull from a set of changesets
591 # the files to pull from a set of changesets
593 changed.append(f)
592 changed.append(f)
594
593
595 # update manifest
594 # update manifest
596 m1.update(new)
595 m1.update(new)
597 for f in remove:
596 for f in remove:
598 if f in m1:
597 if f in m1:
599 del m1[f]
598 del m1[f]
600 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0],
599 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0],
601 (new, remove))
600 (new, remove))
602
601
603 # add changeset
602 # add changeset
604 new = new.keys()
603 new = new.keys()
605 new.sort()
604 new.sort()
606
605
607 user = user or self.ui.username()
606 user = user or self.ui.username()
608 if not text or force_editor:
607 if not text or force_editor:
609 edittext = []
608 edittext = []
610 if text:
609 if text:
611 edittext.append(text)
610 edittext.append(text)
612 edittext.append("")
611 edittext.append("")
613 if p2 != nullid:
612 if p2 != nullid:
614 edittext.append("HG: branch merge")
613 edittext.append("HG: branch merge")
615 edittext.extend(["HG: changed %s" % f for f in changed])
614 edittext.extend(["HG: changed %s" % f for f in changed])
616 edittext.extend(["HG: removed %s" % f for f in remove])
615 edittext.extend(["HG: removed %s" % f for f in remove])
617 if not changed and not remove:
616 if not changed and not remove:
618 edittext.append("HG: no files changed")
617 edittext.append("HG: no files changed")
619 edittext.append("")
618 edittext.append("")
620 # run editor in the repository root
619 # run editor in the repository root
621 olddir = os.getcwd()
620 olddir = os.getcwd()
622 os.chdir(self.root)
621 os.chdir(self.root)
623 text = self.ui.edit("\n".join(edittext), user)
622 text = self.ui.edit("\n".join(edittext), user)
624 os.chdir(olddir)
623 os.chdir(olddir)
625
624
626 lines = [line.rstrip() for line in text.rstrip().splitlines()]
625 lines = [line.rstrip() for line in text.rstrip().splitlines()]
627 while lines and not lines[0]:
626 while lines and not lines[0]:
628 del lines[0]
627 del lines[0]
629 if not lines:
628 if not lines:
630 return None
629 return None
631 text = '\n'.join(lines)
630 text = '\n'.join(lines)
632 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
631 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
633 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
632 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
634 parent2=xp2)
633 parent2=xp2)
635 tr.close()
634 tr.close()
636
635
637 self.dirstate.setparents(n)
636 self.dirstate.setparents(n)
638 self.dirstate.update(new, "n")
637 self.dirstate.update(new, "n")
639 self.dirstate.forget(remove)
638 self.dirstate.forget(remove)
640
639
641 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
640 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
642 return n
641 return n
643
642
644 def walk(self, node=None, files=[], match=util.always, badmatch=None):
643 def walk(self, node=None, files=[], match=util.always, badmatch=None):
645 if node:
644 if node:
646 fdict = dict.fromkeys(files)
645 fdict = dict.fromkeys(files)
647 for fn in self.manifest.read(self.changelog.read(node)[0]):
646 for fn in self.manifest.read(self.changelog.read(node)[0]):
648 fdict.pop(fn, None)
647 fdict.pop(fn, None)
649 if match(fn):
648 if match(fn):
650 yield 'm', fn
649 yield 'm', fn
651 for fn in fdict:
650 for fn in fdict:
652 if badmatch and badmatch(fn):
651 if badmatch and badmatch(fn):
653 if match(fn):
652 if match(fn):
654 yield 'b', fn
653 yield 'b', fn
655 else:
654 else:
656 self.ui.warn(_('%s: No such file in rev %s\n') % (
655 self.ui.warn(_('%s: No such file in rev %s\n') % (
657 util.pathto(self.getcwd(), fn), short(node)))
656 util.pathto(self.getcwd(), fn), short(node)))
658 else:
657 else:
659 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
658 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
660 yield src, fn
659 yield src, fn
661
660
662 def status(self, node1=None, node2=None, files=[], match=util.always,
661 def status(self, node1=None, node2=None, files=[], match=util.always,
663 wlock=None, list_ignored=False, list_clean=False):
662 wlock=None, list_ignored=False, list_clean=False):
664 """return status of files between two nodes or node and working directory
663 """return status of files between two nodes or node and working directory
665
664
666 If node1 is None, use the first dirstate parent instead.
665 If node1 is None, use the first dirstate parent instead.
667 If node2 is None, compare node1 with working directory.
666 If node2 is None, compare node1 with working directory.
668 """
667 """
669
668
670 def fcmp(fn, mf):
669 def fcmp(fn, mf):
671 t1 = self.wread(fn)
670 t1 = self.wread(fn)
672 t2 = self.file(fn).read(mf.get(fn, nullid))
671 t2 = self.file(fn).read(mf.get(fn, nullid))
673 return cmp(t1, t2)
672 return cmp(t1, t2)
674
673
675 def mfmatches(node):
674 def mfmatches(node):
676 change = self.changelog.read(node)
675 change = self.changelog.read(node)
677 mf = dict(self.manifest.read(change[0]))
676 mf = dict(self.manifest.read(change[0]))
678 for fn in mf.keys():
677 for fn in mf.keys():
679 if not match(fn):
678 if not match(fn):
680 del mf[fn]
679 del mf[fn]
681 return mf
680 return mf
682
681
683 modified, added, removed, deleted, unknown = [], [], [], [], []
682 modified, added, removed, deleted, unknown = [], [], [], [], []
684 ignored, clean = [], []
683 ignored, clean = [], []
685
684
686 compareworking = False
685 compareworking = False
687 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
686 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
688 compareworking = True
687 compareworking = True
689
688
690 if not compareworking:
689 if not compareworking:
691 # read the manifest from node1 before the manifest from node2,
690 # read the manifest from node1 before the manifest from node2,
692 # so that we'll hit the manifest cache if we're going through
691 # so that we'll hit the manifest cache if we're going through
693 # all the revisions in parent->child order.
692 # all the revisions in parent->child order.
694 mf1 = mfmatches(node1)
693 mf1 = mfmatches(node1)
695
694
696 # are we comparing the working directory?
695 # are we comparing the working directory?
697 if not node2:
696 if not node2:
698 if not wlock:
697 if not wlock:
699 try:
698 try:
700 wlock = self.wlock(wait=0)
699 wlock = self.wlock(wait=0)
701 except lock.LockException:
700 except lock.LockException:
702 wlock = None
701 wlock = None
703 (lookup, modified, added, removed, deleted, unknown,
702 (lookup, modified, added, removed, deleted, unknown,
704 ignored, clean) = self.dirstate.status(files, match,
703 ignored, clean) = self.dirstate.status(files, match,
705 list_ignored, list_clean)
704 list_ignored, list_clean)
706
705
707 # are we comparing working dir against its parent?
706 # are we comparing working dir against its parent?
708 if compareworking:
707 if compareworking:
709 if lookup:
708 if lookup:
710 # do a full compare of any files that might have changed
709 # do a full compare of any files that might have changed
711 mf2 = mfmatches(self.dirstate.parents()[0])
710 mf2 = mfmatches(self.dirstate.parents()[0])
712 for f in lookup:
711 for f in lookup:
713 if fcmp(f, mf2):
712 if fcmp(f, mf2):
714 modified.append(f)
713 modified.append(f)
715 elif wlock is not None:
714 elif wlock is not None:
716 self.dirstate.update([f], "n")
715 self.dirstate.update([f], "n")
717 else:
716 else:
718 # we are comparing working dir against non-parent
717 # we are comparing working dir against non-parent
719 # generate a pseudo-manifest for the working dir
718 # generate a pseudo-manifest for the working dir
720 mf2 = mfmatches(self.dirstate.parents()[0])
719 mf2 = mfmatches(self.dirstate.parents()[0])
721 for f in lookup + modified + added:
720 for f in lookup + modified + added:
722 mf2[f] = ""
721 mf2[f] = ""
723 for f in removed:
722 for f in removed:
724 if f in mf2:
723 if f in mf2:
725 del mf2[f]
724 del mf2[f]
726 else:
725 else:
727 # we are comparing two revisions
726 # we are comparing two revisions
728 mf2 = mfmatches(node2)
727 mf2 = mfmatches(node2)
729
728
730 if not compareworking:
729 if not compareworking:
731 # flush lists from dirstate before comparing manifests
730 # flush lists from dirstate before comparing manifests
732 modified, added, clean = [], [], []
731 modified, added, clean = [], [], []
733
732
734 # make sure to sort the files so we talk to the disk in a
733 # make sure to sort the files so we talk to the disk in a
735 # reasonable order
734 # reasonable order
736 mf2keys = mf2.keys()
735 mf2keys = mf2.keys()
737 mf2keys.sort()
736 mf2keys.sort()
738 for fn in mf2keys:
737 for fn in mf2keys:
739 if mf1.has_key(fn):
738 if mf1.has_key(fn):
740 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
739 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
741 modified.append(fn)
740 modified.append(fn)
742 elif list_clean:
741 elif list_clean:
743 clean.append(fn)
742 clean.append(fn)
744 del mf1[fn]
743 del mf1[fn]
745 else:
744 else:
746 added.append(fn)
745 added.append(fn)
747
746
748 removed = mf1.keys()
747 removed = mf1.keys()
749
748
750 # sort and return results:
749 # sort and return results:
751 for l in modified, added, removed, deleted, unknown, ignored, clean:
750 for l in modified, added, removed, deleted, unknown, ignored, clean:
752 l.sort()
751 l.sort()
753 return (modified, added, removed, deleted, unknown, ignored, clean)
752 return (modified, added, removed, deleted, unknown, ignored, clean)
754
753
755 def changes(self, node1=None, node2=None, files=[], match=util.always,
754 def changes(self, node1=None, node2=None, files=[], match=util.always,
756 wlock=None, list_ignored=False, list_clean=False):
755 wlock=None, list_ignored=False, list_clean=False):
757 '''DEPRECATED - use status instead'''
756 '''DEPRECATED - use status instead'''
758 marduit = self.status(node1, node2, files, match, wlock,
757 marduit = self.status(node1, node2, files, match, wlock,
759 list_ignored, list_clean)
758 list_ignored, list_clean)
760 if list_ignored:
759 if list_ignored:
761 return marduit[:-1]
760 return marduit[:-1]
762 else:
761 else:
763 return marduit[:-2]
762 return marduit[:-2]
764
763
765 def add(self, list, wlock=None):
764 def add(self, list, wlock=None):
766 if not wlock:
765 if not wlock:
767 wlock = self.wlock()
766 wlock = self.wlock()
768 for f in list:
767 for f in list:
769 p = self.wjoin(f)
768 p = self.wjoin(f)
770 if not os.path.exists(p):
769 if not os.path.exists(p):
771 self.ui.warn(_("%s does not exist!\n") % f)
770 self.ui.warn(_("%s does not exist!\n") % f)
772 elif not os.path.isfile(p):
771 elif not os.path.isfile(p):
773 self.ui.warn(_("%s not added: only files supported currently\n")
772 self.ui.warn(_("%s not added: only files supported currently\n")
774 % f)
773 % f)
775 elif self.dirstate.state(f) in 'an':
774 elif self.dirstate.state(f) in 'an':
776 self.ui.warn(_("%s already tracked!\n") % f)
775 self.ui.warn(_("%s already tracked!\n") % f)
777 else:
776 else:
778 self.dirstate.update([f], "a")
777 self.dirstate.update([f], "a")
779
778
780 def forget(self, list, wlock=None):
779 def forget(self, list, wlock=None):
781 if not wlock:
780 if not wlock:
782 wlock = self.wlock()
781 wlock = self.wlock()
783 for f in list:
782 for f in list:
784 if self.dirstate.state(f) not in 'ai':
783 if self.dirstate.state(f) not in 'ai':
785 self.ui.warn(_("%s not added!\n") % f)
784 self.ui.warn(_("%s not added!\n") % f)
786 else:
785 else:
787 self.dirstate.forget([f])
786 self.dirstate.forget([f])
788
787
789 def remove(self, list, unlink=False, wlock=None):
788 def remove(self, list, unlink=False, wlock=None):
790 if unlink:
789 if unlink:
791 for f in list:
790 for f in list:
792 try:
791 try:
793 util.unlink(self.wjoin(f))
792 util.unlink(self.wjoin(f))
794 except OSError, inst:
793 except OSError, inst:
795 if inst.errno != errno.ENOENT:
794 if inst.errno != errno.ENOENT:
796 raise
795 raise
797 if not wlock:
796 if not wlock:
798 wlock = self.wlock()
797 wlock = self.wlock()
799 for f in list:
798 for f in list:
800 p = self.wjoin(f)
799 p = self.wjoin(f)
801 if os.path.exists(p):
800 if os.path.exists(p):
802 self.ui.warn(_("%s still exists!\n") % f)
801 self.ui.warn(_("%s still exists!\n") % f)
803 elif self.dirstate.state(f) == 'a':
802 elif self.dirstate.state(f) == 'a':
804 self.dirstate.forget([f])
803 self.dirstate.forget([f])
805 elif f not in self.dirstate:
804 elif f not in self.dirstate:
806 self.ui.warn(_("%s not tracked!\n") % f)
805 self.ui.warn(_("%s not tracked!\n") % f)
807 else:
806 else:
808 self.dirstate.update([f], "r")
807 self.dirstate.update([f], "r")
809
808
810 def undelete(self, list, wlock=None):
809 def undelete(self, list, wlock=None):
811 p = self.dirstate.parents()[0]
810 p = self.dirstate.parents()[0]
812 mn = self.changelog.read(p)[0]
811 mn = self.changelog.read(p)[0]
813 m = self.manifest.read(mn)
812 m = self.manifest.read(mn)
814 if not wlock:
813 if not wlock:
815 wlock = self.wlock()
814 wlock = self.wlock()
816 for f in list:
815 for f in list:
817 if self.dirstate.state(f) not in "r":
816 if self.dirstate.state(f) not in "r":
818 self.ui.warn("%s not removed!\n" % f)
817 self.ui.warn("%s not removed!\n" % f)
819 else:
818 else:
820 t = self.file(f).read(m[f])
819 t = self.file(f).read(m[f])
821 self.wwrite(f, t)
820 self.wwrite(f, t)
822 util.set_exec(self.wjoin(f), m.execf(f))
821 util.set_exec(self.wjoin(f), m.execf(f))
823 self.dirstate.update([f], "n")
822 self.dirstate.update([f], "n")
824
823
825 def copy(self, source, dest, wlock=None):
824 def copy(self, source, dest, wlock=None):
826 p = self.wjoin(dest)
825 p = self.wjoin(dest)
827 if not os.path.exists(p):
826 if not os.path.exists(p):
828 self.ui.warn(_("%s does not exist!\n") % dest)
827 self.ui.warn(_("%s does not exist!\n") % dest)
829 elif not os.path.isfile(p):
828 elif not os.path.isfile(p):
830 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
829 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
831 else:
830 else:
832 if not wlock:
831 if not wlock:
833 wlock = self.wlock()
832 wlock = self.wlock()
834 if self.dirstate.state(dest) == '?':
833 if self.dirstate.state(dest) == '?':
835 self.dirstate.update([dest], "a")
834 self.dirstate.update([dest], "a")
836 self.dirstate.copy(source, dest)
835 self.dirstate.copy(source, dest)
837
836
838 def heads(self, start=None):
837 def heads(self, start=None):
839 heads = self.changelog.heads(start)
838 heads = self.changelog.heads(start)
840 # sort the output in rev descending order
839 # sort the output in rev descending order
841 heads = [(-self.changelog.rev(h), h) for h in heads]
840 heads = [(-self.changelog.rev(h), h) for h in heads]
842 heads.sort()
841 heads.sort()
843 return [n for (r, n) in heads]
842 return [n for (r, n) in heads]
844
843
845 # branchlookup returns a dict giving a list of branches for
844 # branchlookup returns a dict giving a list of branches for
846 # each head. A branch is defined as the tag of a node or
845 # each head. A branch is defined as the tag of a node or
847 # the branch of the node's parents. If a node has multiple
846 # the branch of the node's parents. If a node has multiple
848 # branch tags, tags are eliminated if they are visible from other
847 # branch tags, tags are eliminated if they are visible from other
849 # branch tags.
848 # branch tags.
850 #
849 #
851 # So, for this graph: a->b->c->d->e
850 # So, for this graph: a->b->c->d->e
852 # \ /
851 # \ /
853 # aa -----/
852 # aa -----/
854 # a has tag 2.6.12
853 # a has tag 2.6.12
855 # d has tag 2.6.13
854 # d has tag 2.6.13
856 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
855 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
857 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
856 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
858 # from the list.
857 # from the list.
859 #
858 #
860 # It is possible that more than one head will have the same branch tag.
859 # It is possible that more than one head will have the same branch tag.
861 # callers need to check the result for multiple heads under the same
860 # callers need to check the result for multiple heads under the same
862 # branch tag if that is a problem for them (ie checkout of a specific
861 # branch tag if that is a problem for them (ie checkout of a specific
863 # branch).
862 # branch).
864 #
863 #
865 # passing in a specific branch will limit the depth of the search
864 # passing in a specific branch will limit the depth of the search
866 # through the parents. It won't limit the branches returned in the
865 # through the parents. It won't limit the branches returned in the
867 # result though.
866 # result though.
868 def branchlookup(self, heads=None, branch=None):
867 def branchlookup(self, heads=None, branch=None):
869 if not heads:
868 if not heads:
870 heads = self.heads()
869 heads = self.heads()
871 headt = [ h for h in heads ]
870 headt = [ h for h in heads ]
872 chlog = self.changelog
871 chlog = self.changelog
873 branches = {}
872 branches = {}
874 merges = []
873 merges = []
875 seenmerge = {}
874 seenmerge = {}
876
875
877 # traverse the tree once for each head, recording in the branches
876 # traverse the tree once for each head, recording in the branches
878 # dict which tags are visible from this head. The branches
877 # dict which tags are visible from this head. The branches
879 # dict also records which tags are visible from each tag
878 # dict also records which tags are visible from each tag
880 # while we traverse.
879 # while we traverse.
881 while headt or merges:
880 while headt or merges:
882 if merges:
881 if merges:
883 n, found = merges.pop()
882 n, found = merges.pop()
884 visit = [n]
883 visit = [n]
885 else:
884 else:
886 h = headt.pop()
885 h = headt.pop()
887 visit = [h]
886 visit = [h]
888 found = [h]
887 found = [h]
889 seen = {}
888 seen = {}
890 while visit:
889 while visit:
891 n = visit.pop()
890 n = visit.pop()
892 if n in seen:
891 if n in seen:
893 continue
892 continue
894 pp = chlog.parents(n)
893 pp = chlog.parents(n)
895 tags = self.nodetags(n)
894 tags = self.nodetags(n)
896 if tags:
895 if tags:
897 for x in tags:
896 for x in tags:
898 if x == 'tip':
897 if x == 'tip':
899 continue
898 continue
900 for f in found:
899 for f in found:
901 branches.setdefault(f, {})[n] = 1
900 branches.setdefault(f, {})[n] = 1
902 branches.setdefault(n, {})[n] = 1
901 branches.setdefault(n, {})[n] = 1
903 break
902 break
904 if n not in found:
903 if n not in found:
905 found.append(n)
904 found.append(n)
906 if branch in tags:
905 if branch in tags:
907 continue
906 continue
908 seen[n] = 1
907 seen[n] = 1
909 if pp[1] != nullid and n not in seenmerge:
908 if pp[1] != nullid and n not in seenmerge:
910 merges.append((pp[1], [x for x in found]))
909 merges.append((pp[1], [x for x in found]))
911 seenmerge[n] = 1
910 seenmerge[n] = 1
912 if pp[0] != nullid:
911 if pp[0] != nullid:
913 visit.append(pp[0])
912 visit.append(pp[0])
914 # traverse the branches dict, eliminating branch tags from each
913 # traverse the branches dict, eliminating branch tags from each
915 # head that are visible from another branch tag for that head.
914 # head that are visible from another branch tag for that head.
916 out = {}
915 out = {}
917 viscache = {}
916 viscache = {}
918 for h in heads:
917 for h in heads:
919 def visible(node):
918 def visible(node):
920 if node in viscache:
919 if node in viscache:
921 return viscache[node]
920 return viscache[node]
922 ret = {}
921 ret = {}
923 visit = [node]
922 visit = [node]
924 while visit:
923 while visit:
925 x = visit.pop()
924 x = visit.pop()
926 if x in viscache:
925 if x in viscache:
927 ret.update(viscache[x])
926 ret.update(viscache[x])
928 elif x not in ret:
927 elif x not in ret:
929 ret[x] = 1
928 ret[x] = 1
930 if x in branches:
929 if x in branches:
931 visit[len(visit):] = branches[x].keys()
930 visit[len(visit):] = branches[x].keys()
932 viscache[node] = ret
931 viscache[node] = ret
933 return ret
932 return ret
934 if h not in branches:
933 if h not in branches:
935 continue
934 continue
936 # O(n^2), but somewhat limited. This only searches the
935 # O(n^2), but somewhat limited. This only searches the
937 # tags visible from a specific head, not all the tags in the
936 # tags visible from a specific head, not all the tags in the
938 # whole repo.
937 # whole repo.
939 for b in branches[h]:
938 for b in branches[h]:
940 vis = False
939 vis = False
941 for bb in branches[h].keys():
940 for bb in branches[h].keys():
942 if b != bb:
941 if b != bb:
943 if b in visible(bb):
942 if b in visible(bb):
944 vis = True
943 vis = True
945 break
944 break
946 if not vis:
945 if not vis:
947 l = out.setdefault(h, [])
946 l = out.setdefault(h, [])
948 l[len(l):] = self.nodetags(b)
947 l[len(l):] = self.nodetags(b)
949 return out
948 return out
950
949
951 def branches(self, nodes):
950 def branches(self, nodes):
952 if not nodes:
951 if not nodes:
953 nodes = [self.changelog.tip()]
952 nodes = [self.changelog.tip()]
954 b = []
953 b = []
955 for n in nodes:
954 for n in nodes:
956 t = n
955 t = n
957 while 1:
956 while 1:
958 p = self.changelog.parents(n)
957 p = self.changelog.parents(n)
959 if p[1] != nullid or p[0] == nullid:
958 if p[1] != nullid or p[0] == nullid:
960 b.append((t, n, p[0], p[1]))
959 b.append((t, n, p[0], p[1]))
961 break
960 break
962 n = p[0]
961 n = p[0]
963 return b
962 return b
964
963
965 def between(self, pairs):
964 def between(self, pairs):
966 r = []
965 r = []
967
966
968 for top, bottom in pairs:
967 for top, bottom in pairs:
969 n, l, i = top, [], 0
968 n, l, i = top, [], 0
970 f = 1
969 f = 1
971
970
972 while n != bottom:
971 while n != bottom:
973 p = self.changelog.parents(n)[0]
972 p = self.changelog.parents(n)[0]
974 if i == f:
973 if i == f:
975 l.append(n)
974 l.append(n)
976 f = f * 2
975 f = f * 2
977 n = p
976 n = p
978 i += 1
977 i += 1
979
978
980 r.append(l)
979 r.append(l)
981
980
982 return r
981 return r
983
982
984 def findincoming(self, remote, base=None, heads=None, force=False):
983 def findincoming(self, remote, base=None, heads=None, force=False):
985 """Return list of roots of the subsets of missing nodes from remote
984 """Return list of roots of the subsets of missing nodes from remote
986
985
987 If base dict is specified, assume that these nodes and their parents
986 If base dict is specified, assume that these nodes and their parents
988 exist on the remote side and that no child of a node of base exists
987 exist on the remote side and that no child of a node of base exists
989 in both remote and self.
988 in both remote and self.
990 Furthermore base will be updated to include the nodes that exists
989 Furthermore base will be updated to include the nodes that exists
991 in self and remote but no children exists in self and remote.
990 in self and remote but no children exists in self and remote.
992 If a list of heads is specified, return only nodes which are heads
991 If a list of heads is specified, return only nodes which are heads
993 or ancestors of these heads.
992 or ancestors of these heads.
994
993
995 All the ancestors of base are in self and in remote.
994 All the ancestors of base are in self and in remote.
996 All the descendants of the list returned are missing in self.
995 All the descendants of the list returned are missing in self.
997 (and so we know that the rest of the nodes are missing in remote, see
996 (and so we know that the rest of the nodes are missing in remote, see
998 outgoing)
997 outgoing)
999 """
998 """
1000 m = self.changelog.nodemap
999 m = self.changelog.nodemap
1001 search = []
1000 search = []
1002 fetch = {}
1001 fetch = {}
1003 seen = {}
1002 seen = {}
1004 seenbranch = {}
1003 seenbranch = {}
1005 if base == None:
1004 if base == None:
1006 base = {}
1005 base = {}
1007
1006
1008 if not heads:
1007 if not heads:
1009 heads = remote.heads()
1008 heads = remote.heads()
1010
1009
1011 if self.changelog.tip() == nullid:
1010 if self.changelog.tip() == nullid:
1012 base[nullid] = 1
1011 base[nullid] = 1
1013 if heads != [nullid]:
1012 if heads != [nullid]:
1014 return [nullid]
1013 return [nullid]
1015 return []
1014 return []
1016
1015
1017 # assume we're closer to the tip than the root
1016 # assume we're closer to the tip than the root
1018 # and start by examining the heads
1017 # and start by examining the heads
1019 self.ui.status(_("searching for changes\n"))
1018 self.ui.status(_("searching for changes\n"))
1020
1019
1021 unknown = []
1020 unknown = []
1022 for h in heads:
1021 for h in heads:
1023 if h not in m:
1022 if h not in m:
1024 unknown.append(h)
1023 unknown.append(h)
1025 else:
1024 else:
1026 base[h] = 1
1025 base[h] = 1
1027
1026
1028 if not unknown:
1027 if not unknown:
1029 return []
1028 return []
1030
1029
1031 req = dict.fromkeys(unknown)
1030 req = dict.fromkeys(unknown)
1032 reqcnt = 0
1031 reqcnt = 0
1033
1032
1034 # search through remote branches
1033 # search through remote branches
1035 # a 'branch' here is a linear segment of history, with four parts:
1034 # a 'branch' here is a linear segment of history, with four parts:
1036 # head, root, first parent, second parent
1035 # head, root, first parent, second parent
1037 # (a branch always has two parents (or none) by definition)
1036 # (a branch always has two parents (or none) by definition)
1038 unknown = remote.branches(unknown)
1037 unknown = remote.branches(unknown)
1039 while unknown:
1038 while unknown:
1040 r = []
1039 r = []
1041 while unknown:
1040 while unknown:
1042 n = unknown.pop(0)
1041 n = unknown.pop(0)
1043 if n[0] in seen:
1042 if n[0] in seen:
1044 continue
1043 continue
1045
1044
1046 self.ui.debug(_("examining %s:%s\n")
1045 self.ui.debug(_("examining %s:%s\n")
1047 % (short(n[0]), short(n[1])))
1046 % (short(n[0]), short(n[1])))
1048 if n[0] == nullid: # found the end of the branch
1047 if n[0] == nullid: # found the end of the branch
1049 pass
1048 pass
1050 elif n in seenbranch:
1049 elif n in seenbranch:
1051 self.ui.debug(_("branch already found\n"))
1050 self.ui.debug(_("branch already found\n"))
1052 continue
1051 continue
1053 elif n[1] and n[1] in m: # do we know the base?
1052 elif n[1] and n[1] in m: # do we know the base?
1054 self.ui.debug(_("found incomplete branch %s:%s\n")
1053 self.ui.debug(_("found incomplete branch %s:%s\n")
1055 % (short(n[0]), short(n[1])))
1054 % (short(n[0]), short(n[1])))
1056 search.append(n) # schedule branch range for scanning
1055 search.append(n) # schedule branch range for scanning
1057 seenbranch[n] = 1
1056 seenbranch[n] = 1
1058 else:
1057 else:
1059 if n[1] not in seen and n[1] not in fetch:
1058 if n[1] not in seen and n[1] not in fetch:
1060 if n[2] in m and n[3] in m:
1059 if n[2] in m and n[3] in m:
1061 self.ui.debug(_("found new changeset %s\n") %
1060 self.ui.debug(_("found new changeset %s\n") %
1062 short(n[1]))
1061 short(n[1]))
1063 fetch[n[1]] = 1 # earliest unknown
1062 fetch[n[1]] = 1 # earliest unknown
1064 for p in n[2:4]:
1063 for p in n[2:4]:
1065 if p in m:
1064 if p in m:
1066 base[p] = 1 # latest known
1065 base[p] = 1 # latest known
1067
1066
1068 for p in n[2:4]:
1067 for p in n[2:4]:
1069 if p not in req and p not in m:
1068 if p not in req and p not in m:
1070 r.append(p)
1069 r.append(p)
1071 req[p] = 1
1070 req[p] = 1
1072 seen[n[0]] = 1
1071 seen[n[0]] = 1
1073
1072
1074 if r:
1073 if r:
1075 reqcnt += 1
1074 reqcnt += 1
1076 self.ui.debug(_("request %d: %s\n") %
1075 self.ui.debug(_("request %d: %s\n") %
1077 (reqcnt, " ".join(map(short, r))))
1076 (reqcnt, " ".join(map(short, r))))
1078 for p in range(0, len(r), 10):
1077 for p in range(0, len(r), 10):
1079 for b in remote.branches(r[p:p+10]):
1078 for b in remote.branches(r[p:p+10]):
1080 self.ui.debug(_("received %s:%s\n") %
1079 self.ui.debug(_("received %s:%s\n") %
1081 (short(b[0]), short(b[1])))
1080 (short(b[0]), short(b[1])))
1082 unknown.append(b)
1081 unknown.append(b)
1083
1082
1084 # do binary search on the branches we found
1083 # do binary search on the branches we found
1085 while search:
1084 while search:
1086 n = search.pop(0)
1085 n = search.pop(0)
1087 reqcnt += 1
1086 reqcnt += 1
1088 l = remote.between([(n[0], n[1])])[0]
1087 l = remote.between([(n[0], n[1])])[0]
1089 l.append(n[1])
1088 l.append(n[1])
1090 p = n[0]
1089 p = n[0]
1091 f = 1
1090 f = 1
1092 for i in l:
1091 for i in l:
1093 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1092 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1094 if i in m:
1093 if i in m:
1095 if f <= 2:
1094 if f <= 2:
1096 self.ui.debug(_("found new branch changeset %s\n") %
1095 self.ui.debug(_("found new branch changeset %s\n") %
1097 short(p))
1096 short(p))
1098 fetch[p] = 1
1097 fetch[p] = 1
1099 base[i] = 1
1098 base[i] = 1
1100 else:
1099 else:
1101 self.ui.debug(_("narrowed branch search to %s:%s\n")
1100 self.ui.debug(_("narrowed branch search to %s:%s\n")
1102 % (short(p), short(i)))
1101 % (short(p), short(i)))
1103 search.append((p, i))
1102 search.append((p, i))
1104 break
1103 break
1105 p, f = i, f * 2
1104 p, f = i, f * 2
1106
1105
1107 # sanity check our fetch list
1106 # sanity check our fetch list
1108 for f in fetch.keys():
1107 for f in fetch.keys():
1109 if f in m:
1108 if f in m:
1110 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1109 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1111
1110
1112 if base.keys() == [nullid]:
1111 if base.keys() == [nullid]:
1113 if force:
1112 if force:
1114 self.ui.warn(_("warning: repository is unrelated\n"))
1113 self.ui.warn(_("warning: repository is unrelated\n"))
1115 else:
1114 else:
1116 raise util.Abort(_("repository is unrelated"))
1115 raise util.Abort(_("repository is unrelated"))
1117
1116
1118 self.ui.note(_("found new changesets starting at ") +
1117 self.ui.note(_("found new changesets starting at ") +
1119 " ".join([short(f) for f in fetch]) + "\n")
1118 " ".join([short(f) for f in fetch]) + "\n")
1120
1119
1121 self.ui.debug(_("%d total queries\n") % reqcnt)
1120 self.ui.debug(_("%d total queries\n") % reqcnt)
1122
1121
1123 return fetch.keys()
1122 return fetch.keys()
1124
1123
1125 def findoutgoing(self, remote, base=None, heads=None, force=False):
1124 def findoutgoing(self, remote, base=None, heads=None, force=False):
1126 """Return list of nodes that are roots of subsets not in remote
1125 """Return list of nodes that are roots of subsets not in remote
1127
1126
1128 If base dict is specified, assume that these nodes and their parents
1127 If base dict is specified, assume that these nodes and their parents
1129 exist on the remote side.
1128 exist on the remote side.
1130 If a list of heads is specified, return only nodes which are heads
1129 If a list of heads is specified, return only nodes which are heads
1131 or ancestors of these heads, and return a second element which
1130 or ancestors of these heads, and return a second element which
1132 contains all remote heads which get new children.
1131 contains all remote heads which get new children.
1133 """
1132 """
1134 if base == None:
1133 if base == None:
1135 base = {}
1134 base = {}
1136 self.findincoming(remote, base, heads, force=force)
1135 self.findincoming(remote, base, heads, force=force)
1137
1136
1138 self.ui.debug(_("common changesets up to ")
1137 self.ui.debug(_("common changesets up to ")
1139 + " ".join(map(short, base.keys())) + "\n")
1138 + " ".join(map(short, base.keys())) + "\n")
1140
1139
1141 remain = dict.fromkeys(self.changelog.nodemap)
1140 remain = dict.fromkeys(self.changelog.nodemap)
1142
1141
1143 # prune everything remote has from the tree
1142 # prune everything remote has from the tree
1144 del remain[nullid]
1143 del remain[nullid]
1145 remove = base.keys()
1144 remove = base.keys()
1146 while remove:
1145 while remove:
1147 n = remove.pop(0)
1146 n = remove.pop(0)
1148 if n in remain:
1147 if n in remain:
1149 del remain[n]
1148 del remain[n]
1150 for p in self.changelog.parents(n):
1149 for p in self.changelog.parents(n):
1151 remove.append(p)
1150 remove.append(p)
1152
1151
1153 # find every node whose parents have been pruned
1152 # find every node whose parents have been pruned
1154 subset = []
1153 subset = []
1155 # find every remote head that will get new children
1154 # find every remote head that will get new children
1156 updated_heads = {}
1155 updated_heads = {}
1157 for n in remain:
1156 for n in remain:
1158 p1, p2 = self.changelog.parents(n)
1157 p1, p2 = self.changelog.parents(n)
1159 if p1 not in remain and p2 not in remain:
1158 if p1 not in remain and p2 not in remain:
1160 subset.append(n)
1159 subset.append(n)
1161 if heads:
1160 if heads:
1162 if p1 in heads:
1161 if p1 in heads:
1163 updated_heads[p1] = True
1162 updated_heads[p1] = True
1164 if p2 in heads:
1163 if p2 in heads:
1165 updated_heads[p2] = True
1164 updated_heads[p2] = True
1166
1165
1167 # this is the set of all roots we have to push
1166 # this is the set of all roots we have to push
1168 if heads:
1167 if heads:
1169 return subset, updated_heads.keys()
1168 return subset, updated_heads.keys()
1170 else:
1169 else:
1171 return subset
1170 return subset
1172
1171
1173 def pull(self, remote, heads=None, force=False, lock=None):
1172 def pull(self, remote, heads=None, force=False, lock=None):
1174 mylock = False
1173 mylock = False
1175 if not lock:
1174 if not lock:
1176 lock = self.lock()
1175 lock = self.lock()
1177 mylock = True
1176 mylock = True
1178
1177
1179 try:
1178 try:
1180 fetch = self.findincoming(remote, force=force)
1179 fetch = self.findincoming(remote, force=force)
1181 if fetch == [nullid]:
1180 if fetch == [nullid]:
1182 self.ui.status(_("requesting all changes\n"))
1181 self.ui.status(_("requesting all changes\n"))
1183
1182
1184 if not fetch:
1183 if not fetch:
1185 self.ui.status(_("no changes found\n"))
1184 self.ui.status(_("no changes found\n"))
1186 return 0
1185 return 0
1187
1186
1188 if heads is None:
1187 if heads is None:
1189 cg = remote.changegroup(fetch, 'pull')
1188 cg = remote.changegroup(fetch, 'pull')
1190 else:
1189 else:
1191 cg = remote.changegroupsubset(fetch, heads, 'pull')
1190 cg = remote.changegroupsubset(fetch, heads, 'pull')
1192 return self.addchangegroup(cg, 'pull', remote.url())
1191 return self.addchangegroup(cg, 'pull', remote.url())
1193 finally:
1192 finally:
1194 if mylock:
1193 if mylock:
1195 lock.release()
1194 lock.release()
1196
1195
1197 def push(self, remote, force=False, revs=None):
1196 def push(self, remote, force=False, revs=None):
1198 # there are two ways to push to remote repo:
1197 # there are two ways to push to remote repo:
1199 #
1198 #
1200 # addchangegroup assumes local user can lock remote
1199 # addchangegroup assumes local user can lock remote
1201 # repo (local filesystem, old ssh servers).
1200 # repo (local filesystem, old ssh servers).
1202 #
1201 #
1203 # unbundle assumes local user cannot lock remote repo (new ssh
1202 # unbundle assumes local user cannot lock remote repo (new ssh
1204 # servers, http servers).
1203 # servers, http servers).
1205
1204
1206 if remote.capable('unbundle'):
1205 if remote.capable('unbundle'):
1207 return self.push_unbundle(remote, force, revs)
1206 return self.push_unbundle(remote, force, revs)
1208 return self.push_addchangegroup(remote, force, revs)
1207 return self.push_addchangegroup(remote, force, revs)
1209
1208
1210 def prepush(self, remote, force, revs):
1209 def prepush(self, remote, force, revs):
1211 base = {}
1210 base = {}
1212 remote_heads = remote.heads()
1211 remote_heads = remote.heads()
1213 inc = self.findincoming(remote, base, remote_heads, force=force)
1212 inc = self.findincoming(remote, base, remote_heads, force=force)
1214 if not force and inc:
1213 if not force and inc:
1215 self.ui.warn(_("abort: unsynced remote changes!\n"))
1214 self.ui.warn(_("abort: unsynced remote changes!\n"))
1216 self.ui.status(_("(did you forget to sync?"
1215 self.ui.status(_("(did you forget to sync?"
1217 " use push -f to force)\n"))
1216 " use push -f to force)\n"))
1218 return None, 1
1217 return None, 1
1219
1218
1220 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1219 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1221 if revs is not None:
1220 if revs is not None:
1222 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1221 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1223 else:
1222 else:
1224 bases, heads = update, self.changelog.heads()
1223 bases, heads = update, self.changelog.heads()
1225
1224
1226 if not bases:
1225 if not bases:
1227 self.ui.status(_("no changes found\n"))
1226 self.ui.status(_("no changes found\n"))
1228 return None, 1
1227 return None, 1
1229 elif not force:
1228 elif not force:
1230 # FIXME we don't properly detect creation of new heads
1229 # FIXME we don't properly detect creation of new heads
1231 # in the push -r case, assume the user knows what he's doing
1230 # in the push -r case, assume the user knows what he's doing
1232 if not revs and len(remote_heads) < len(heads) \
1231 if not revs and len(remote_heads) < len(heads) \
1233 and remote_heads != [nullid]:
1232 and remote_heads != [nullid]:
1234 self.ui.warn(_("abort: push creates new remote branches!\n"))
1233 self.ui.warn(_("abort: push creates new remote branches!\n"))
1235 self.ui.status(_("(did you forget to merge?"
1234 self.ui.status(_("(did you forget to merge?"
1236 " use push -f to force)\n"))
1235 " use push -f to force)\n"))
1237 return None, 1
1236 return None, 1
1238
1237
1239 if revs is None:
1238 if revs is None:
1240 cg = self.changegroup(update, 'push')
1239 cg = self.changegroup(update, 'push')
1241 else:
1240 else:
1242 cg = self.changegroupsubset(update, revs, 'push')
1241 cg = self.changegroupsubset(update, revs, 'push')
1243 return cg, remote_heads
1242 return cg, remote_heads
1244
1243
1245 def push_addchangegroup(self, remote, force, revs):
1244 def push_addchangegroup(self, remote, force, revs):
1246 lock = remote.lock()
1245 lock = remote.lock()
1247
1246
1248 ret = self.prepush(remote, force, revs)
1247 ret = self.prepush(remote, force, revs)
1249 if ret[0] is not None:
1248 if ret[0] is not None:
1250 cg, remote_heads = ret
1249 cg, remote_heads = ret
1251 return remote.addchangegroup(cg, 'push', self.url())
1250 return remote.addchangegroup(cg, 'push', self.url())
1252 return ret[1]
1251 return ret[1]
1253
1252
1254 def push_unbundle(self, remote, force, revs):
1253 def push_unbundle(self, remote, force, revs):
1255 # local repo finds heads on server, finds out what revs it
1254 # local repo finds heads on server, finds out what revs it
1256 # must push. once revs transferred, if server finds it has
1255 # must push. once revs transferred, if server finds it has
1257 # different heads (someone else won commit/push race), server
1256 # different heads (someone else won commit/push race), server
1258 # aborts.
1257 # aborts.
1259
1258
1260 ret = self.prepush(remote, force, revs)
1259 ret = self.prepush(remote, force, revs)
1261 if ret[0] is not None:
1260 if ret[0] is not None:
1262 cg, remote_heads = ret
1261 cg, remote_heads = ret
1263 if force: remote_heads = ['force']
1262 if force: remote_heads = ['force']
1264 return remote.unbundle(cg, remote_heads, 'push')
1263 return remote.unbundle(cg, remote_heads, 'push')
1265 return ret[1]
1264 return ret[1]
1266
1265
1267 def changegroupsubset(self, bases, heads, source):
1266 def changegroupsubset(self, bases, heads, source):
1268 """This function generates a changegroup consisting of all the nodes
1267 """This function generates a changegroup consisting of all the nodes
1269 that are descendents of any of the bases, and ancestors of any of
1268 that are descendents of any of the bases, and ancestors of any of
1270 the heads.
1269 the heads.
1271
1270
1272 It is fairly complex as determining which filenodes and which
1271 It is fairly complex as determining which filenodes and which
1273 manifest nodes need to be included for the changeset to be complete
1272 manifest nodes need to be included for the changeset to be complete
1274 is non-trivial.
1273 is non-trivial.
1275
1274
1276 Another wrinkle is doing the reverse, figuring out which changeset in
1275 Another wrinkle is doing the reverse, figuring out which changeset in
1277 the changegroup a particular filenode or manifestnode belongs to."""
1276 the changegroup a particular filenode or manifestnode belongs to."""
1278
1277
1279 self.hook('preoutgoing', throw=True, source=source)
1278 self.hook('preoutgoing', throw=True, source=source)
1280
1279
1281 # Set up some initial variables
1280 # Set up some initial variables
1282 # Make it easy to refer to self.changelog
1281 # Make it easy to refer to self.changelog
1283 cl = self.changelog
1282 cl = self.changelog
1284 # msng is short for missing - compute the list of changesets in this
1283 # msng is short for missing - compute the list of changesets in this
1285 # changegroup.
1284 # changegroup.
1286 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1285 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1287 # Some bases may turn out to be superfluous, and some heads may be
1286 # Some bases may turn out to be superfluous, and some heads may be
1288 # too. nodesbetween will return the minimal set of bases and heads
1287 # too. nodesbetween will return the minimal set of bases and heads
1289 # necessary to re-create the changegroup.
1288 # necessary to re-create the changegroup.
1290
1289
1291 # Known heads are the list of heads that it is assumed the recipient
1290 # Known heads are the list of heads that it is assumed the recipient
1292 # of this changegroup will know about.
1291 # of this changegroup will know about.
1293 knownheads = {}
1292 knownheads = {}
1294 # We assume that all parents of bases are known heads.
1293 # We assume that all parents of bases are known heads.
1295 for n in bases:
1294 for n in bases:
1296 for p in cl.parents(n):
1295 for p in cl.parents(n):
1297 if p != nullid:
1296 if p != nullid:
1298 knownheads[p] = 1
1297 knownheads[p] = 1
1299 knownheads = knownheads.keys()
1298 knownheads = knownheads.keys()
1300 if knownheads:
1299 if knownheads:
1301 # Now that we know what heads are known, we can compute which
1300 # Now that we know what heads are known, we can compute which
1302 # changesets are known. The recipient must know about all
1301 # changesets are known. The recipient must know about all
1303 # changesets required to reach the known heads from the null
1302 # changesets required to reach the known heads from the null
1304 # changeset.
1303 # changeset.
1305 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1304 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1306 junk = None
1305 junk = None
1307 # Transform the list into an ersatz set.
1306 # Transform the list into an ersatz set.
1308 has_cl_set = dict.fromkeys(has_cl_set)
1307 has_cl_set = dict.fromkeys(has_cl_set)
1309 else:
1308 else:
1310 # If there were no known heads, the recipient cannot be assumed to
1309 # If there were no known heads, the recipient cannot be assumed to
1311 # know about any changesets.
1310 # know about any changesets.
1312 has_cl_set = {}
1311 has_cl_set = {}
1313
1312
1314 # Make it easy to refer to self.manifest
1313 # Make it easy to refer to self.manifest
1315 mnfst = self.manifest
1314 mnfst = self.manifest
1316 # We don't know which manifests are missing yet
1315 # We don't know which manifests are missing yet
1317 msng_mnfst_set = {}
1316 msng_mnfst_set = {}
1318 # Nor do we know which filenodes are missing.
1317 # Nor do we know which filenodes are missing.
1319 msng_filenode_set = {}
1318 msng_filenode_set = {}
1320
1319
1321 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1320 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1322 junk = None
1321 junk = None
1323
1322
1324 # A changeset always belongs to itself, so the changenode lookup
1323 # A changeset always belongs to itself, so the changenode lookup
1325 # function for a changenode is identity.
1324 # function for a changenode is identity.
1326 def identity(x):
1325 def identity(x):
1327 return x
1326 return x
1328
1327
1329 # A function generating function. Sets up an environment for the
1328 # A function generating function. Sets up an environment for the
1330 # inner function.
1329 # inner function.
1331 def cmp_by_rev_func(revlog):
1330 def cmp_by_rev_func(revlog):
1332 # Compare two nodes by their revision number in the environment's
1331 # Compare two nodes by their revision number in the environment's
1333 # revision history. Since the revision number both represents the
1332 # revision history. Since the revision number both represents the
1334 # most efficient order to read the nodes in, and represents a
1333 # most efficient order to read the nodes in, and represents a
1335 # topological sorting of the nodes, this function is often useful.
1334 # topological sorting of the nodes, this function is often useful.
1336 def cmp_by_rev(a, b):
1335 def cmp_by_rev(a, b):
1337 return cmp(revlog.rev(a), revlog.rev(b))
1336 return cmp(revlog.rev(a), revlog.rev(b))
1338 return cmp_by_rev
1337 return cmp_by_rev
1339
1338
1340 # If we determine that a particular file or manifest node must be a
1339 # If we determine that a particular file or manifest node must be a
1341 # node that the recipient of the changegroup will already have, we can
1340 # node that the recipient of the changegroup will already have, we can
1342 # also assume the recipient will have all the parents. This function
1341 # also assume the recipient will have all the parents. This function
1343 # prunes them from the set of missing nodes.
1342 # prunes them from the set of missing nodes.
1344 def prune_parents(revlog, hasset, msngset):
1343 def prune_parents(revlog, hasset, msngset):
1345 haslst = hasset.keys()
1344 haslst = hasset.keys()
1346 haslst.sort(cmp_by_rev_func(revlog))
1345 haslst.sort(cmp_by_rev_func(revlog))
1347 for node in haslst:
1346 for node in haslst:
1348 parentlst = [p for p in revlog.parents(node) if p != nullid]
1347 parentlst = [p for p in revlog.parents(node) if p != nullid]
1349 while parentlst:
1348 while parentlst:
1350 n = parentlst.pop()
1349 n = parentlst.pop()
1351 if n not in hasset:
1350 if n not in hasset:
1352 hasset[n] = 1
1351 hasset[n] = 1
1353 p = [p for p in revlog.parents(n) if p != nullid]
1352 p = [p for p in revlog.parents(n) if p != nullid]
1354 parentlst.extend(p)
1353 parentlst.extend(p)
1355 for n in hasset:
1354 for n in hasset:
1356 msngset.pop(n, None)
1355 msngset.pop(n, None)
1357
1356
1358 # This is a function generating function used to set up an environment
1357 # This is a function generating function used to set up an environment
1359 # for the inner function to execute in.
1358 # for the inner function to execute in.
1360 def manifest_and_file_collector(changedfileset):
1359 def manifest_and_file_collector(changedfileset):
1361 # This is an information gathering function that gathers
1360 # This is an information gathering function that gathers
1362 # information from each changeset node that goes out as part of
1361 # information from each changeset node that goes out as part of
1363 # the changegroup. The information gathered is a list of which
1362 # the changegroup. The information gathered is a list of which
1364 # manifest nodes are potentially required (the recipient may
1363 # manifest nodes are potentially required (the recipient may
1365 # already have them) and total list of all files which were
1364 # already have them) and total list of all files which were
1366 # changed in any changeset in the changegroup.
1365 # changed in any changeset in the changegroup.
1367 #
1366 #
1368 # We also remember the first changenode we saw any manifest
1367 # We also remember the first changenode we saw any manifest
1369 # referenced by so we can later determine which changenode 'owns'
1368 # referenced by so we can later determine which changenode 'owns'
1370 # the manifest.
1369 # the manifest.
1371 def collect_manifests_and_files(clnode):
1370 def collect_manifests_and_files(clnode):
1372 c = cl.read(clnode)
1371 c = cl.read(clnode)
1373 for f in c[3]:
1372 for f in c[3]:
1374 # This is to make sure we only have one instance of each
1373 # This is to make sure we only have one instance of each
1375 # filename string for each filename.
1374 # filename string for each filename.
1376 changedfileset.setdefault(f, f)
1375 changedfileset.setdefault(f, f)
1377 msng_mnfst_set.setdefault(c[0], clnode)
1376 msng_mnfst_set.setdefault(c[0], clnode)
1378 return collect_manifests_and_files
1377 return collect_manifests_and_files
1379
1378
1380 # Figure out which manifest nodes (of the ones we think might be part
1379 # Figure out which manifest nodes (of the ones we think might be part
1381 # of the changegroup) the recipient must know about and remove them
1380 # of the changegroup) the recipient must know about and remove them
1382 # from the changegroup.
1381 # from the changegroup.
1383 def prune_manifests():
1382 def prune_manifests():
1384 has_mnfst_set = {}
1383 has_mnfst_set = {}
1385 for n in msng_mnfst_set:
1384 for n in msng_mnfst_set:
1386 # If a 'missing' manifest thinks it belongs to a changenode
1385 # If a 'missing' manifest thinks it belongs to a changenode
1387 # the recipient is assumed to have, obviously the recipient
1386 # the recipient is assumed to have, obviously the recipient
1388 # must have that manifest.
1387 # must have that manifest.
1389 linknode = cl.node(mnfst.linkrev(n))
1388 linknode = cl.node(mnfst.linkrev(n))
1390 if linknode in has_cl_set:
1389 if linknode in has_cl_set:
1391 has_mnfst_set[n] = 1
1390 has_mnfst_set[n] = 1
1392 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1391 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1393
1392
1394 # Use the information collected in collect_manifests_and_files to say
1393 # Use the information collected in collect_manifests_and_files to say
1395 # which changenode any manifestnode belongs to.
1394 # which changenode any manifestnode belongs to.
1396 def lookup_manifest_link(mnfstnode):
1395 def lookup_manifest_link(mnfstnode):
1397 return msng_mnfst_set[mnfstnode]
1396 return msng_mnfst_set[mnfstnode]
1398
1397
1399 # A function generating function that sets up the initial environment
1398 # A function generating function that sets up the initial environment
1400 # the inner function.
1399 # the inner function.
1401 def filenode_collector(changedfiles):
1400 def filenode_collector(changedfiles):
1402 next_rev = [0]
1401 next_rev = [0]
1403 # This gathers information from each manifestnode included in the
1402 # This gathers information from each manifestnode included in the
1404 # changegroup about which filenodes the manifest node references
1403 # changegroup about which filenodes the manifest node references
1405 # so we can include those in the changegroup too.
1404 # so we can include those in the changegroup too.
1406 #
1405 #
1407 # It also remembers which changenode each filenode belongs to. It
1406 # It also remembers which changenode each filenode belongs to. It
1408 # does this by assuming the a filenode belongs to the changenode
1407 # does this by assuming the a filenode belongs to the changenode
1409 # the first manifest that references it belongs to.
1408 # the first manifest that references it belongs to.
1410 def collect_msng_filenodes(mnfstnode):
1409 def collect_msng_filenodes(mnfstnode):
1411 r = mnfst.rev(mnfstnode)
1410 r = mnfst.rev(mnfstnode)
1412 if r == next_rev[0]:
1411 if r == next_rev[0]:
1413 # If the last rev we looked at was the one just previous,
1412 # If the last rev we looked at was the one just previous,
1414 # we only need to see a diff.
1413 # we only need to see a diff.
1415 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1414 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1416 # For each line in the delta
1415 # For each line in the delta
1417 for dline in delta.splitlines():
1416 for dline in delta.splitlines():
1418 # get the filename and filenode for that line
1417 # get the filename and filenode for that line
1419 f, fnode = dline.split('\0')
1418 f, fnode = dline.split('\0')
1420 fnode = bin(fnode[:40])
1419 fnode = bin(fnode[:40])
1421 f = changedfiles.get(f, None)
1420 f = changedfiles.get(f, None)
1422 # And if the file is in the list of files we care
1421 # And if the file is in the list of files we care
1423 # about.
1422 # about.
1424 if f is not None:
1423 if f is not None:
1425 # Get the changenode this manifest belongs to
1424 # Get the changenode this manifest belongs to
1426 clnode = msng_mnfst_set[mnfstnode]
1425 clnode = msng_mnfst_set[mnfstnode]
1427 # Create the set of filenodes for the file if
1426 # Create the set of filenodes for the file if
1428 # there isn't one already.
1427 # there isn't one already.
1429 ndset = msng_filenode_set.setdefault(f, {})
1428 ndset = msng_filenode_set.setdefault(f, {})
1430 # And set the filenode's changelog node to the
1429 # And set the filenode's changelog node to the
1431 # manifest's if it hasn't been set already.
1430 # manifest's if it hasn't been set already.
1432 ndset.setdefault(fnode, clnode)
1431 ndset.setdefault(fnode, clnode)
1433 else:
1432 else:
1434 # Otherwise we need a full manifest.
1433 # Otherwise we need a full manifest.
1435 m = mnfst.read(mnfstnode)
1434 m = mnfst.read(mnfstnode)
1436 # For every file in we care about.
1435 # For every file in we care about.
1437 for f in changedfiles:
1436 for f in changedfiles:
1438 fnode = m.get(f, None)
1437 fnode = m.get(f, None)
1439 # If it's in the manifest
1438 # If it's in the manifest
1440 if fnode is not None:
1439 if fnode is not None:
1441 # See comments above.
1440 # See comments above.
1442 clnode = msng_mnfst_set[mnfstnode]
1441 clnode = msng_mnfst_set[mnfstnode]
1443 ndset = msng_filenode_set.setdefault(f, {})
1442 ndset = msng_filenode_set.setdefault(f, {})
1444 ndset.setdefault(fnode, clnode)
1443 ndset.setdefault(fnode, clnode)
1445 # Remember the revision we hope to see next.
1444 # Remember the revision we hope to see next.
1446 next_rev[0] = r + 1
1445 next_rev[0] = r + 1
1447 return collect_msng_filenodes
1446 return collect_msng_filenodes
1448
1447
1449 # We have a list of filenodes we think we need for a file, lets remove
1448 # We have a list of filenodes we think we need for a file, lets remove
1450 # all those we now the recipient must have.
1449 # all those we now the recipient must have.
1451 def prune_filenodes(f, filerevlog):
1450 def prune_filenodes(f, filerevlog):
1452 msngset = msng_filenode_set[f]
1451 msngset = msng_filenode_set[f]
1453 hasset = {}
1452 hasset = {}
1454 # If a 'missing' filenode thinks it belongs to a changenode we
1453 # If a 'missing' filenode thinks it belongs to a changenode we
1455 # assume the recipient must have, then the recipient must have
1454 # assume the recipient must have, then the recipient must have
1456 # that filenode.
1455 # that filenode.
1457 for n in msngset:
1456 for n in msngset:
1458 clnode = cl.node(filerevlog.linkrev(n))
1457 clnode = cl.node(filerevlog.linkrev(n))
1459 if clnode in has_cl_set:
1458 if clnode in has_cl_set:
1460 hasset[n] = 1
1459 hasset[n] = 1
1461 prune_parents(filerevlog, hasset, msngset)
1460 prune_parents(filerevlog, hasset, msngset)
1462
1461
1463 # A function generator function that sets up the a context for the
1462 # A function generator function that sets up the a context for the
1464 # inner function.
1463 # inner function.
1465 def lookup_filenode_link_func(fname):
1464 def lookup_filenode_link_func(fname):
1466 msngset = msng_filenode_set[fname]
1465 msngset = msng_filenode_set[fname]
1467 # Lookup the changenode the filenode belongs to.
1466 # Lookup the changenode the filenode belongs to.
1468 def lookup_filenode_link(fnode):
1467 def lookup_filenode_link(fnode):
1469 return msngset[fnode]
1468 return msngset[fnode]
1470 return lookup_filenode_link
1469 return lookup_filenode_link
1471
1470
1472 # Now that we have all theses utility functions to help out and
1471 # Now that we have all theses utility functions to help out and
1473 # logically divide up the task, generate the group.
1472 # logically divide up the task, generate the group.
1474 def gengroup():
1473 def gengroup():
1475 # The set of changed files starts empty.
1474 # The set of changed files starts empty.
1476 changedfiles = {}
1475 changedfiles = {}
1477 # Create a changenode group generator that will call our functions
1476 # Create a changenode group generator that will call our functions
1478 # back to lookup the owning changenode and collect information.
1477 # back to lookup the owning changenode and collect information.
1479 group = cl.group(msng_cl_lst, identity,
1478 group = cl.group(msng_cl_lst, identity,
1480 manifest_and_file_collector(changedfiles))
1479 manifest_and_file_collector(changedfiles))
1481 for chnk in group:
1480 for chnk in group:
1482 yield chnk
1481 yield chnk
1483
1482
1484 # The list of manifests has been collected by the generator
1483 # The list of manifests has been collected by the generator
1485 # calling our functions back.
1484 # calling our functions back.
1486 prune_manifests()
1485 prune_manifests()
1487 msng_mnfst_lst = msng_mnfst_set.keys()
1486 msng_mnfst_lst = msng_mnfst_set.keys()
1488 # Sort the manifestnodes by revision number.
1487 # Sort the manifestnodes by revision number.
1489 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1488 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1490 # Create a generator for the manifestnodes that calls our lookup
1489 # Create a generator for the manifestnodes that calls our lookup
1491 # and data collection functions back.
1490 # and data collection functions back.
1492 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1491 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1493 filenode_collector(changedfiles))
1492 filenode_collector(changedfiles))
1494 for chnk in group:
1493 for chnk in group:
1495 yield chnk
1494 yield chnk
1496
1495
1497 # These are no longer needed, dereference and toss the memory for
1496 # These are no longer needed, dereference and toss the memory for
1498 # them.
1497 # them.
1499 msng_mnfst_lst = None
1498 msng_mnfst_lst = None
1500 msng_mnfst_set.clear()
1499 msng_mnfst_set.clear()
1501
1500
1502 changedfiles = changedfiles.keys()
1501 changedfiles = changedfiles.keys()
1503 changedfiles.sort()
1502 changedfiles.sort()
1504 # Go through all our files in order sorted by name.
1503 # Go through all our files in order sorted by name.
1505 for fname in changedfiles:
1504 for fname in changedfiles:
1506 filerevlog = self.file(fname)
1505 filerevlog = self.file(fname)
1507 # Toss out the filenodes that the recipient isn't really
1506 # Toss out the filenodes that the recipient isn't really
1508 # missing.
1507 # missing.
1509 if msng_filenode_set.has_key(fname):
1508 if msng_filenode_set.has_key(fname):
1510 prune_filenodes(fname, filerevlog)
1509 prune_filenodes(fname, filerevlog)
1511 msng_filenode_lst = msng_filenode_set[fname].keys()
1510 msng_filenode_lst = msng_filenode_set[fname].keys()
1512 else:
1511 else:
1513 msng_filenode_lst = []
1512 msng_filenode_lst = []
1514 # If any filenodes are left, generate the group for them,
1513 # If any filenodes are left, generate the group for them,
1515 # otherwise don't bother.
1514 # otherwise don't bother.
1516 if len(msng_filenode_lst) > 0:
1515 if len(msng_filenode_lst) > 0:
1517 yield changegroup.genchunk(fname)
1516 yield changegroup.genchunk(fname)
1518 # Sort the filenodes by their revision #
1517 # Sort the filenodes by their revision #
1519 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1518 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1520 # Create a group generator and only pass in a changenode
1519 # Create a group generator and only pass in a changenode
1521 # lookup function as we need to collect no information
1520 # lookup function as we need to collect no information
1522 # from filenodes.
1521 # from filenodes.
1523 group = filerevlog.group(msng_filenode_lst,
1522 group = filerevlog.group(msng_filenode_lst,
1524 lookup_filenode_link_func(fname))
1523 lookup_filenode_link_func(fname))
1525 for chnk in group:
1524 for chnk in group:
1526 yield chnk
1525 yield chnk
1527 if msng_filenode_set.has_key(fname):
1526 if msng_filenode_set.has_key(fname):
1528 # Don't need this anymore, toss it to free memory.
1527 # Don't need this anymore, toss it to free memory.
1529 del msng_filenode_set[fname]
1528 del msng_filenode_set[fname]
1530 # Signal that no more groups are left.
1529 # Signal that no more groups are left.
1531 yield changegroup.closechunk()
1530 yield changegroup.closechunk()
1532
1531
1533 if msng_cl_lst:
1532 if msng_cl_lst:
1534 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1533 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1535
1534
1536 return util.chunkbuffer(gengroup())
1535 return util.chunkbuffer(gengroup())
1537
1536
1538 def changegroup(self, basenodes, source):
1537 def changegroup(self, basenodes, source):
1539 """Generate a changegroup of all nodes that we have that a recipient
1538 """Generate a changegroup of all nodes that we have that a recipient
1540 doesn't.
1539 doesn't.
1541
1540
1542 This is much easier than the previous function as we can assume that
1541 This is much easier than the previous function as we can assume that
1543 the recipient has any changenode we aren't sending them."""
1542 the recipient has any changenode we aren't sending them."""
1544
1543
1545 self.hook('preoutgoing', throw=True, source=source)
1544 self.hook('preoutgoing', throw=True, source=source)
1546
1545
1547 cl = self.changelog
1546 cl = self.changelog
1548 nodes = cl.nodesbetween(basenodes, None)[0]
1547 nodes = cl.nodesbetween(basenodes, None)[0]
1549 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1548 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1550
1549
1551 def identity(x):
1550 def identity(x):
1552 return x
1551 return x
1553
1552
1554 def gennodelst(revlog):
1553 def gennodelst(revlog):
1555 for r in xrange(0, revlog.count()):
1554 for r in xrange(0, revlog.count()):
1556 n = revlog.node(r)
1555 n = revlog.node(r)
1557 if revlog.linkrev(n) in revset:
1556 if revlog.linkrev(n) in revset:
1558 yield n
1557 yield n
1559
1558
1560 def changed_file_collector(changedfileset):
1559 def changed_file_collector(changedfileset):
1561 def collect_changed_files(clnode):
1560 def collect_changed_files(clnode):
1562 c = cl.read(clnode)
1561 c = cl.read(clnode)
1563 for fname in c[3]:
1562 for fname in c[3]:
1564 changedfileset[fname] = 1
1563 changedfileset[fname] = 1
1565 return collect_changed_files
1564 return collect_changed_files
1566
1565
1567 def lookuprevlink_func(revlog):
1566 def lookuprevlink_func(revlog):
1568 def lookuprevlink(n):
1567 def lookuprevlink(n):
1569 return cl.node(revlog.linkrev(n))
1568 return cl.node(revlog.linkrev(n))
1570 return lookuprevlink
1569 return lookuprevlink
1571
1570
1572 def gengroup():
1571 def gengroup():
1573 # construct a list of all changed files
1572 # construct a list of all changed files
1574 changedfiles = {}
1573 changedfiles = {}
1575
1574
1576 for chnk in cl.group(nodes, identity,
1575 for chnk in cl.group(nodes, identity,
1577 changed_file_collector(changedfiles)):
1576 changed_file_collector(changedfiles)):
1578 yield chnk
1577 yield chnk
1579 changedfiles = changedfiles.keys()
1578 changedfiles = changedfiles.keys()
1580 changedfiles.sort()
1579 changedfiles.sort()
1581
1580
1582 mnfst = self.manifest
1581 mnfst = self.manifest
1583 nodeiter = gennodelst(mnfst)
1582 nodeiter = gennodelst(mnfst)
1584 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1583 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1585 yield chnk
1584 yield chnk
1586
1585
1587 for fname in changedfiles:
1586 for fname in changedfiles:
1588 filerevlog = self.file(fname)
1587 filerevlog = self.file(fname)
1589 nodeiter = gennodelst(filerevlog)
1588 nodeiter = gennodelst(filerevlog)
1590 nodeiter = list(nodeiter)
1589 nodeiter = list(nodeiter)
1591 if nodeiter:
1590 if nodeiter:
1592 yield changegroup.genchunk(fname)
1591 yield changegroup.genchunk(fname)
1593 lookup = lookuprevlink_func(filerevlog)
1592 lookup = lookuprevlink_func(filerevlog)
1594 for chnk in filerevlog.group(nodeiter, lookup):
1593 for chnk in filerevlog.group(nodeiter, lookup):
1595 yield chnk
1594 yield chnk
1596
1595
1597 yield changegroup.closechunk()
1596 yield changegroup.closechunk()
1598
1597
1599 if nodes:
1598 if nodes:
1600 self.hook('outgoing', node=hex(nodes[0]), source=source)
1599 self.hook('outgoing', node=hex(nodes[0]), source=source)
1601
1600
1602 return util.chunkbuffer(gengroup())
1601 return util.chunkbuffer(gengroup())
1603
1602
1604 def addchangegroup(self, source, srctype, url):
1603 def addchangegroup(self, source, srctype, url):
1605 """add changegroup to repo.
1604 """add changegroup to repo.
1606 returns number of heads modified or added + 1."""
1605 returns number of heads modified or added + 1."""
1607
1606
1608 def csmap(x):
1607 def csmap(x):
1609 self.ui.debug(_("add changeset %s\n") % short(x))
1608 self.ui.debug(_("add changeset %s\n") % short(x))
1610 return cl.count()
1609 return cl.count()
1611
1610
1612 def revmap(x):
1611 def revmap(x):
1613 return cl.rev(x)
1612 return cl.rev(x)
1614
1613
1615 if not source:
1614 if not source:
1616 return 0
1615 return 0
1617
1616
1618 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1617 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1619
1618
1620 changesets = files = revisions = 0
1619 changesets = files = revisions = 0
1621
1620
1622 tr = self.transaction()
1621 tr = self.transaction()
1623
1622
1624 # write changelog data to temp files so concurrent readers will not see
1623 # write changelog data to temp files so concurrent readers will not see
1625 # inconsistent view
1624 # inconsistent view
1626 cl = None
1625 cl = None
1627 try:
1626 try:
1628 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1627 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1629
1628
1630 oldheads = len(cl.heads())
1629 oldheads = len(cl.heads())
1631
1630
1632 # pull off the changeset group
1631 # pull off the changeset group
1633 self.ui.status(_("adding changesets\n"))
1632 self.ui.status(_("adding changesets\n"))
1634 cor = cl.count() - 1
1633 cor = cl.count() - 1
1635 chunkiter = changegroup.chunkiter(source)
1634 chunkiter = changegroup.chunkiter(source)
1636 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1635 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1637 raise util.Abort(_("received changelog group is empty"))
1636 raise util.Abort(_("received changelog group is empty"))
1638 cnr = cl.count() - 1
1637 cnr = cl.count() - 1
1639 changesets = cnr - cor
1638 changesets = cnr - cor
1640
1639
1641 # pull off the manifest group
1640 # pull off the manifest group
1642 self.ui.status(_("adding manifests\n"))
1641 self.ui.status(_("adding manifests\n"))
1643 chunkiter = changegroup.chunkiter(source)
1642 chunkiter = changegroup.chunkiter(source)
1644 # no need to check for empty manifest group here:
1643 # no need to check for empty manifest group here:
1645 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1644 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1646 # no new manifest will be created and the manifest group will
1645 # no new manifest will be created and the manifest group will
1647 # be empty during the pull
1646 # be empty during the pull
1648 self.manifest.addgroup(chunkiter, revmap, tr)
1647 self.manifest.addgroup(chunkiter, revmap, tr)
1649
1648
1650 # process the files
1649 # process the files
1651 self.ui.status(_("adding file changes\n"))
1650 self.ui.status(_("adding file changes\n"))
1652 while 1:
1651 while 1:
1653 f = changegroup.getchunk(source)
1652 f = changegroup.getchunk(source)
1654 if not f:
1653 if not f:
1655 break
1654 break
1656 self.ui.debug(_("adding %s revisions\n") % f)
1655 self.ui.debug(_("adding %s revisions\n") % f)
1657 fl = self.file(f)
1656 fl = self.file(f)
1658 o = fl.count()
1657 o = fl.count()
1659 chunkiter = changegroup.chunkiter(source)
1658 chunkiter = changegroup.chunkiter(source)
1660 if fl.addgroup(chunkiter, revmap, tr) is None:
1659 if fl.addgroup(chunkiter, revmap, tr) is None:
1661 raise util.Abort(_("received file revlog group is empty"))
1660 raise util.Abort(_("received file revlog group is empty"))
1662 revisions += fl.count() - o
1661 revisions += fl.count() - o
1663 files += 1
1662 files += 1
1664
1663
1665 cl.writedata()
1664 cl.writedata()
1666 finally:
1665 finally:
1667 if cl:
1666 if cl:
1668 cl.cleanup()
1667 cl.cleanup()
1669
1668
1670 # make changelog see real files again
1669 # make changelog see real files again
1671 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1670 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1672 self.changelog.checkinlinesize(tr)
1671 self.changelog.checkinlinesize(tr)
1673
1672
1674 newheads = len(self.changelog.heads())
1673 newheads = len(self.changelog.heads())
1675 heads = ""
1674 heads = ""
1676 if oldheads and newheads != oldheads:
1675 if oldheads and newheads != oldheads:
1677 heads = _(" (%+d heads)") % (newheads - oldheads)
1676 heads = _(" (%+d heads)") % (newheads - oldheads)
1678
1677
1679 self.ui.status(_("added %d changesets"
1678 self.ui.status(_("added %d changesets"
1680 " with %d changes to %d files%s\n")
1679 " with %d changes to %d files%s\n")
1681 % (changesets, revisions, files, heads))
1680 % (changesets, revisions, files, heads))
1682
1681
1683 if changesets > 0:
1682 if changesets > 0:
1684 self.hook('pretxnchangegroup', throw=True,
1683 self.hook('pretxnchangegroup', throw=True,
1685 node=hex(self.changelog.node(cor+1)), source=srctype,
1684 node=hex(self.changelog.node(cor+1)), source=srctype,
1686 url=url)
1685 url=url)
1687
1686
1688 tr.close()
1687 tr.close()
1689
1688
1690 if changesets > 0:
1689 if changesets > 0:
1691 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1690 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1692 source=srctype, url=url)
1691 source=srctype, url=url)
1693
1692
1694 for i in range(cor + 1, cnr + 1):
1693 for i in range(cor + 1, cnr + 1):
1695 self.hook("incoming", node=hex(self.changelog.node(i)),
1694 self.hook("incoming", node=hex(self.changelog.node(i)),
1696 source=srctype, url=url)
1695 source=srctype, url=url)
1697
1696
1698 return newheads - oldheads + 1
1697 return newheads - oldheads + 1
1699
1698
1700
1699
1701 def stream_in(self, remote):
1700 def stream_in(self, remote):
1702 fp = remote.stream_out()
1701 fp = remote.stream_out()
1703 resp = int(fp.readline())
1702 resp = int(fp.readline())
1704 if resp != 0:
1703 if resp != 0:
1705 raise util.Abort(_('operation forbidden by server'))
1704 raise util.Abort(_('operation forbidden by server'))
1706 self.ui.status(_('streaming all changes\n'))
1705 self.ui.status(_('streaming all changes\n'))
1707 total_files, total_bytes = map(int, fp.readline().split(' ', 1))
1706 total_files, total_bytes = map(int, fp.readline().split(' ', 1))
1708 self.ui.status(_('%d files to transfer, %s of data\n') %
1707 self.ui.status(_('%d files to transfer, %s of data\n') %
1709 (total_files, util.bytecount(total_bytes)))
1708 (total_files, util.bytecount(total_bytes)))
1710 start = time.time()
1709 start = time.time()
1711 for i in xrange(total_files):
1710 for i in xrange(total_files):
1712 name, size = fp.readline().split('\0', 1)
1711 name, size = fp.readline().split('\0', 1)
1713 size = int(size)
1712 size = int(size)
1714 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1713 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1715 ofp = self.opener(name, 'w')
1714 ofp = self.opener(name, 'w')
1716 for chunk in util.filechunkiter(fp, limit=size):
1715 for chunk in util.filechunkiter(fp, limit=size):
1717 ofp.write(chunk)
1716 ofp.write(chunk)
1718 ofp.close()
1717 ofp.close()
1719 elapsed = time.time() - start
1718 elapsed = time.time() - start
1720 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1719 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1721 (util.bytecount(total_bytes), elapsed,
1720 (util.bytecount(total_bytes), elapsed,
1722 util.bytecount(total_bytes / elapsed)))
1721 util.bytecount(total_bytes / elapsed)))
1723 self.reload()
1722 self.reload()
1724 return len(self.heads()) + 1
1723 return len(self.heads()) + 1
1725
1724
1726 def clone(self, remote, heads=[], stream=False):
1725 def clone(self, remote, heads=[], stream=False):
1727 '''clone remote repository.
1726 '''clone remote repository.
1728
1727
1729 keyword arguments:
1728 keyword arguments:
1730 heads: list of revs to clone (forces use of pull)
1729 heads: list of revs to clone (forces use of pull)
1731 stream: use streaming clone if possible'''
1730 stream: use streaming clone if possible'''
1732
1731
1733 # now, all clients that can request uncompressed clones can
1732 # now, all clients that can request uncompressed clones can
1734 # read repo formats supported by all servers that can serve
1733 # read repo formats supported by all servers that can serve
1735 # them.
1734 # them.
1736
1735
1737 # if revlog format changes, client will have to check version
1736 # if revlog format changes, client will have to check version
1738 # and format flags on "stream" capability, and use
1737 # and format flags on "stream" capability, and use
1739 # uncompressed only if compatible.
1738 # uncompressed only if compatible.
1740
1739
1741 if stream and not heads and remote.capable('stream'):
1740 if stream and not heads and remote.capable('stream'):
1742 return self.stream_in(remote)
1741 return self.stream_in(remote)
1743 return self.pull(remote, heads)
1742 return self.pull(remote, heads)
1744
1743
1745 # used to avoid circular references so destructors work
1744 # used to avoid circular references so destructors work
1746 def aftertrans(base):
1745 def aftertrans(base):
1747 p = base
1746 p = base
1748 def a():
1747 def a():
1749 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1748 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1750 util.rename(os.path.join(p, "journal.dirstate"),
1749 util.rename(os.path.join(p, "journal.dirstate"),
1751 os.path.join(p, "undo.dirstate"))
1750 os.path.join(p, "undo.dirstate"))
1752 return a
1751 return a
1753
1752
1754 def instance(ui, path, create):
1753 def instance(ui, path, create):
1755 return localrepository(ui, util.drop_scheme('file', path), create)
1754 return localrepository(ui, util.drop_scheme('file', path), create)
1756
1755
1757 def islocal(path):
1756 def islocal(path):
1758 return True
1757 return True
@@ -1,199 +1,202
1 # manifest.py - manifest revision class for mercurial
1 # manifest.py - manifest revision class for mercurial
2 #
2 #
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from revlog import *
8 from revlog import *
9 from i18n import gettext as _
9 from i18n import gettext as _
10 from demandload import *
10 from demandload import *
11 demandload(globals(), "array bisect struct")
11 demandload(globals(), "array bisect struct")
12
12
13 class manifestdict(dict):
13 class manifestdict(dict):
14 def __init__(self, mapping={}, flags={}):
14 def __init__(self, mapping=None, flags=None):
15 if mapping is None: mapping = {}
16 if flags is None: flags = {}
15 dict.__init__(self, mapping)
17 dict.__init__(self, mapping)
16 self._flags = flags
18 self._flags = flags
17 def flags(self, f):
19 def flags(self, f):
18 return self._flags.get(f, "")
20 return self._flags.get(f, "")
19 def execf(self, f):
21 def execf(self, f):
20 "test for executable in manifest flags"
22 "test for executable in manifest flags"
21 return "x" in self.flags(f)
23 return "x" in self.flags(f)
22 def linkf(self, f):
24 def linkf(self, f):
23 "test for symlink in manifest flags"
25 "test for symlink in manifest flags"
24 return "l" in self.flags(f)
26 return "l" in self.flags(f)
25 def rawset(self, f, entry):
27 def rawset(self, f, entry):
26 self[f] = bin(entry[:40])
28 self[f] = bin(entry[:40])
27 fl = entry[40:-1]
29 fl = entry[40:-1]
28 if fl: self._flags[f] = fl
30 if fl: self._flags[f] = fl
29 def set(self, f, execf=False, linkf=False):
31 def set(self, f, execf=False, linkf=False):
30 if execf: self._flags[f] = "x"
32 if linkf: self._flags[f] = "l"
31 if linkf: self._flags[f] = "x"
33 elif execf: self._flags[f] = "x"
34 else: self._flags[f] = ""
32 def copy(self):
35 def copy(self):
33 return manifestdict(dict.copy(self), dict.copy(self._flags))
36 return manifestdict(dict.copy(self), dict.copy(self._flags))
34
37
35 class manifest(revlog):
38 class manifest(revlog):
36 def __init__(self, opener, defversion=REVLOGV0):
39 def __init__(self, opener, defversion=REVLOGV0):
37 self.mapcache = None
40 self.mapcache = None
38 self.listcache = None
41 self.listcache = None
39 revlog.__init__(self, opener, "00manifest.i", "00manifest.d",
42 revlog.__init__(self, opener, "00manifest.i", "00manifest.d",
40 defversion)
43 defversion)
41
44
42 def read(self, node):
45 def read(self, node):
43 if node == nullid: return manifestdict() # don't upset local cache
46 if node == nullid: return manifestdict() # don't upset local cache
44 if self.mapcache and self.mapcache[0] == node:
47 if self.mapcache and self.mapcache[0] == node:
45 return self.mapcache[1]
48 return self.mapcache[1]
46 text = self.revision(node)
49 text = self.revision(node)
47 self.listcache = array.array('c', text)
50 self.listcache = array.array('c', text)
48 lines = text.splitlines(1)
51 lines = text.splitlines(1)
49 mapping = manifestdict()
52 mapping = manifestdict()
50 for l in lines:
53 for l in lines:
51 (f, n) = l.split('\0')
54 (f, n) = l.split('\0')
52 mapping.rawset(f, n)
55 mapping.rawset(f, n)
53 self.mapcache = (node, mapping)
56 self.mapcache = (node, mapping)
54 return mapping
57 return mapping
55
58
56 def diff(self, a, b):
59 def diff(self, a, b):
57 return mdiff.textdiff(str(a), str(b))
60 return mdiff.textdiff(str(a), str(b))
58
61
59 def _search(self, m, s, lo=0, hi=None):
62 def _search(self, m, s, lo=0, hi=None):
60 '''return a tuple (start, end) that says where to find s within m.
63 '''return a tuple (start, end) that says where to find s within m.
61
64
62 If the string is found m[start:end] are the line containing
65 If the string is found m[start:end] are the line containing
63 that string. If start == end the string was not found and
66 that string. If start == end the string was not found and
64 they indicate the proper sorted insertion point. This was
67 they indicate the proper sorted insertion point. This was
65 taken from bisect_left, and modified to find line start/end as
68 taken from bisect_left, and modified to find line start/end as
66 it goes along.
69 it goes along.
67
70
68 m should be a buffer or a string
71 m should be a buffer or a string
69 s is a string'''
72 s is a string'''
70 def advance(i, c):
73 def advance(i, c):
71 while i < lenm and m[i] != c:
74 while i < lenm and m[i] != c:
72 i += 1
75 i += 1
73 return i
76 return i
74 lenm = len(m)
77 lenm = len(m)
75 if not hi:
78 if not hi:
76 hi = lenm
79 hi = lenm
77 while lo < hi:
80 while lo < hi:
78 mid = (lo + hi) // 2
81 mid = (lo + hi) // 2
79 start = mid
82 start = mid
80 while start > 0 and m[start-1] != '\n':
83 while start > 0 and m[start-1] != '\n':
81 start -= 1
84 start -= 1
82 end = advance(start, '\0')
85 end = advance(start, '\0')
83 if m[start:end] < s:
86 if m[start:end] < s:
84 # we know that after the null there are 40 bytes of sha1
87 # we know that after the null there are 40 bytes of sha1
85 # this translates to the bisect lo = mid + 1
88 # this translates to the bisect lo = mid + 1
86 lo = advance(end + 40, '\n') + 1
89 lo = advance(end + 40, '\n') + 1
87 else:
90 else:
88 # this translates to the bisect hi = mid
91 # this translates to the bisect hi = mid
89 hi = start
92 hi = start
90 end = advance(lo, '\0')
93 end = advance(lo, '\0')
91 found = m[lo:end]
94 found = m[lo:end]
92 if cmp(s, found) == 0:
95 if cmp(s, found) == 0:
93 # we know that after the null there are 40 bytes of sha1
96 # we know that after the null there are 40 bytes of sha1
94 end = advance(end + 40, '\n')
97 end = advance(end + 40, '\n')
95 return (lo, end+1)
98 return (lo, end+1)
96 else:
99 else:
97 return (lo, lo)
100 return (lo, lo)
98
101
99 def find(self, node, f):
102 def find(self, node, f):
100 '''look up entry for a single file efficiently.
103 '''look up entry for a single file efficiently.
101 return (node, flag) pair if found, (None, None) if not.'''
104 return (node, flag) pair if found, (None, None) if not.'''
102 if self.mapcache and node == self.mapcache[0]:
105 if self.mapcache and node == self.mapcache[0]:
103 return self.mapcache[1].get(f), self.mapcache[1].flags(f)
106 return self.mapcache[1].get(f), self.mapcache[1].flags(f)
104 text = self.revision(node)
107 text = self.revision(node)
105 start, end = self._search(text, f)
108 start, end = self._search(text, f)
106 if start == end:
109 if start == end:
107 return None, None
110 return None, None
108 l = text[start:end]
111 l = text[start:end]
109 f, n = l.split('\0')
112 f, n = l.split('\0')
110 return bin(n[:40]), n[40:-1] == 'x'
113 return bin(n[:40]), n[40:-1] == 'x'
111
114
112 def add(self, map, transaction, link, p1=None, p2=None,
115 def add(self, map, transaction, link, p1=None, p2=None,
113 changed=None):
116 changed=None):
114 # apply the changes collected during the bisect loop to our addlist
117 # apply the changes collected during the bisect loop to our addlist
115 # return a delta suitable for addrevision
118 # return a delta suitable for addrevision
116 def addlistdelta(addlist, x):
119 def addlistdelta(addlist, x):
117 # start from the bottom up
120 # start from the bottom up
118 # so changes to the offsets don't mess things up.
121 # so changes to the offsets don't mess things up.
119 i = len(x)
122 i = len(x)
120 while i > 0:
123 while i > 0:
121 i -= 1
124 i -= 1
122 start = x[i][0]
125 start = x[i][0]
123 end = x[i][1]
126 end = x[i][1]
124 if x[i][2]:
127 if x[i][2]:
125 addlist[start:end] = array.array('c', x[i][2])
128 addlist[start:end] = array.array('c', x[i][2])
126 else:
129 else:
127 del addlist[start:end]
130 del addlist[start:end]
128 return "".join([struct.pack(">lll", d[0], d[1], len(d[2])) + d[2] \
131 return "".join([struct.pack(">lll", d[0], d[1], len(d[2])) + d[2] \
129 for d in x ])
132 for d in x ])
130
133
131 # if we're using the listcache, make sure it is valid and
134 # if we're using the listcache, make sure it is valid and
132 # parented by the same node we're diffing against
135 # parented by the same node we're diffing against
133 if not changed or not self.listcache or not p1 or \
136 if not changed or not self.listcache or not p1 or \
134 self.mapcache[0] != p1:
137 self.mapcache[0] != p1:
135 files = map.keys()
138 files = map.keys()
136 files.sort()
139 files.sort()
137
140
138 # if this is changed to support newlines in filenames,
141 # if this is changed to support newlines in filenames,
139 # be sure to check the templates/ dir again (especially *-raw.tmpl)
142 # be sure to check the templates/ dir again (especially *-raw.tmpl)
140 text = ["%s\000%s%s\n" % (f, hex(map[f]), map.flags(f)) for f in files]
143 text = ["%s\000%s%s\n" % (f, hex(map[f]), map.flags(f)) for f in files]
141 self.listcache = array.array('c', "".join(text))
144 self.listcache = array.array('c', "".join(text))
142 cachedelta = None
145 cachedelta = None
143 else:
146 else:
144 addlist = self.listcache
147 addlist = self.listcache
145
148
146 # combine the changed lists into one list for sorting
149 # combine the changed lists into one list for sorting
147 work = [[x, 0] for x in changed[0]]
150 work = [[x, 0] for x in changed[0]]
148 work[len(work):] = [[x, 1] for x in changed[1]]
151 work[len(work):] = [[x, 1] for x in changed[1]]
149 work.sort()
152 work.sort()
150
153
151 delta = []
154 delta = []
152 dstart = None
155 dstart = None
153 dend = None
156 dend = None
154 dline = [""]
157 dline = [""]
155 start = 0
158 start = 0
156 # zero copy representation of addlist as a buffer
159 # zero copy representation of addlist as a buffer
157 addbuf = buffer(addlist)
160 addbuf = buffer(addlist)
158
161
159 # start with a readonly loop that finds the offset of
162 # start with a readonly loop that finds the offset of
160 # each line and creates the deltas
163 # each line and creates the deltas
161 for w in work:
164 for w in work:
162 f = w[0]
165 f = w[0]
163 # bs will either be the index of the item or the insert point
166 # bs will either be the index of the item or the insert point
164 start, end = self._search(addbuf, f, start)
167 start, end = self._search(addbuf, f, start)
165 if w[1] == 0:
168 if w[1] == 0:
166 l = "%s\000%s%s\n" % (f, hex(map[f]), map.flags(f))
169 l = "%s\000%s%s\n" % (f, hex(map[f]), map.flags(f))
167 else:
170 else:
168 l = ""
171 l = ""
169 if start == end and w[1] == 1:
172 if start == end and w[1] == 1:
170 # item we want to delete was not found, error out
173 # item we want to delete was not found, error out
171 raise AssertionError(
174 raise AssertionError(
172 _("failed to remove %s from manifest\n") % f)
175 _("failed to remove %s from manifest\n") % f)
173 if dstart != None and dstart <= start and dend >= start:
176 if dstart != None and dstart <= start and dend >= start:
174 if dend < end:
177 if dend < end:
175 dend = end
178 dend = end
176 if l:
179 if l:
177 dline.append(l)
180 dline.append(l)
178 else:
181 else:
179 if dstart != None:
182 if dstart != None:
180 delta.append([dstart, dend, "".join(dline)])
183 delta.append([dstart, dend, "".join(dline)])
181 dstart = start
184 dstart = start
182 dend = end
185 dend = end
183 dline = [l]
186 dline = [l]
184
187
185 if dstart != None:
188 if dstart != None:
186 delta.append([dstart, dend, "".join(dline)])
189 delta.append([dstart, dend, "".join(dline)])
187 # apply the delta to the addlist, and get a delta for addrevision
190 # apply the delta to the addlist, and get a delta for addrevision
188 cachedelta = addlistdelta(addlist, delta)
191 cachedelta = addlistdelta(addlist, delta)
189
192
190 # the delta is only valid if we've been processing the tip revision
193 # the delta is only valid if we've been processing the tip revision
191 if self.mapcache[0] != self.tip():
194 if self.mapcache[0] != self.tip():
192 cachedelta = None
195 cachedelta = None
193 self.listcache = addlist
196 self.listcache = addlist
194
197
195 n = self.addrevision(buffer(self.listcache), transaction, link, p1, \
198 n = self.addrevision(buffer(self.listcache), transaction, link, p1, \
196 p2, cachedelta)
199 p2, cachedelta)
197 self.mapcache = (n, map)
200 self.mapcache = (n, map)
198
201
199 return n
202 return n
General Comments 0
You need to be logged in to leave comments. Login now