summaryrefslogtreecommitdiff
path: root/FBGM.py
blob: 349693ebf0a25c0a80eef6d80f308a79ab999fa7 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
from email import header
from ftplib import parse150
import os, sys, docx
from string import punctuation

def html_file_generator(path, page_header, page_contents):
    filename = path[path.rfind('/')+1: path.rfind('.')]

    if os.path.exists(filename+".html") == True:
        os.remove(filename+".html")

    htmlfile = open(filename+".html", "x")
    htmlfile.write('<!DOCTYPE html>'
'<html lang ="en">'
    '<head>'
        '<title>' +filename+ '</title>'
        '<link rel="stylesheet" type="text/css" href="stylesheet.css">'
        '<link rel="icon" type="image/x-icon" href="images/favicon.ico">'
        '<meta charset="utf-8"/>'
     '</head>'
     '<body>'
        '<h1>' +page_header+ '</h1>')
    htmlfile.write(page_contents)
    htmlfile.write('</body>'
    '</html>')


#Text file handler
def paragraph_parser(contents):
        punctuation = [".", "?", "!", '"']
        paragraph_end = [".\n\n", "?\n\n", "!\n\n", '"\n\n', "|E"]
        article_end = '\n'
        paragraph_index = [0]
        article_index = [0]
        paragraph_number = 1
        paragraph=[]
        article=[]
        x = 0
        contents=contents.strip()
        contents+="|E\n"
        while x in range (len(paragraph_end)):
            if contents.find(paragraph_end[x]) == -1:
                paragraph_end.pop(x)
            else:
                x += 1   
        while paragraph_end:
            minimum = contents[paragraph_index[paragraph_number-1]:].index(paragraph_end[0]) +paragraph_index[paragraph_number-1]
            for x in paragraph_end:
                if contents[paragraph_index[paragraph_number-1]:].index(x)+paragraph_index[paragraph_number-1]+1 < minimum:
                   minimum = contents[paragraph_index[paragraph_number-1]:].index(x)+paragraph_index[paragraph_number-1]
            paragraph_index.append(minimum+1)
            paragraph.append(contents[paragraph_index[paragraph_number-1]:paragraph_index[paragraph_number]]+'</p>')
            for x in paragraph_end:
                if contents[paragraph_index[paragraph_number]:].find(x) == -1:
                    paragraph_end.remove(x)
            first_nl =contents[paragraph_index[paragraph_number-1]+2:paragraph_index[paragraph_number]].index(article_end)+paragraph_index[paragraph_number-1]+2
            if contents[first_nl-1:first_nl] not in punctuation:
                article_index.append(contents[paragraph_index[paragraph_number-1]+2:paragraph_index[paragraph_number]].index(article_end)+paragraph_index[paragraph_number-1])
            paragraph_number +=1

        paragraph_number = 0
        for x in range(1,len(article_index)):
            if contents[article_index[x-1]:article_index[x]-2].rfind('\n') != -1:
                article.append(contents[contents[:article_index[x]].rfind('\n')+1:article_index[x]+2])
            elif x==1:
                article.append(contents[contents[:article_index[x]].rfind('\n')+1:article_index[x]+2])
        for x in article_index[1:]:
                while x not in range(paragraph_index[paragraph_number],paragraph_index[paragraph_number+1]): 
                    paragraph[paragraph_number]="<p>"+paragraph[paragraph_number]
                    paragraph_number +=1
                paragraph[paragraph_number]=paragraph[paragraph_number].replace("\n"," ")
                paragraph[paragraph_number]=paragraph[paragraph_number].strip()
                paragraph[paragraph_number]="<h2>"+article[article_index.index(x)-1]+"</h2>\n<p>"+paragraph[paragraph_number][len(article[article_index.index(x)-1]):]
                print("Header "+str(x)+" is in paragraph "+str(paragraph_number))
        paragraph[-1] = paragraph[-1][:-5]+paragraph[-1][-4:]
        return ''.join(paragraph)


#Document file Handler
def dochandler(path):
    doc = docx.Document(path)
    header_styles= ["Subtitle","Heading 1", "Heading 2"]
    paragraph_style= ["Normal","No Spacing"]
    title = (path[path.rfind('/')+1: path.rfind('.')]).capitalize()
    if doc.paragraphs[0].style.name == "Title":
        title = doc.paragraphs[0].text
    fullText = ""
    for para in doc.paragraphs:
        if para.style.name in header_styles:
            fullText+="<h2>"+para.text
            fullText+="</h2>\n"
        elif para.style.name in paragraph_style:
            fullText+="<p>"+para.text
            fullText+="</p>\n"
        print(para.text, para.style.name)
    html_file_generator(path, title, fullText)
    return '\n'.join(path)
    

            
            


def txthandler(path):
    f = open(path, "r")
    contents = f.read()
    main_header = contents[0:contents.find('\n')]
    paragraphs = paragraph_parser(contents[contents.find('\n'):])    
    html_file_generator(path,main_header,paragraphs)
    print(f.read())
    return 0

def pdfhandler(path):
    f = open(path)
    print(f.read())
    return 0
    


#Attempting to pass txt file in path 
#path = 'C:/Code/texttohtml/ThisOne.docx'
path = 'C:/Users/Josh/Desktop/porkandbeans/texttohtml/memes.docx'

extension = path[path.rfind('.')+1: len(path)]


if extension =='txt':
    txthandler(path)

elif extension == 'pdf':
    pdfhandler(path)

elif extension == 'doc' or 'docx':
    dochandler(path)

else:
    print("Extension not recognized")
    sys.exit()