summaryrefslogtreecommitdiff
path: root/modules/telegraf/telegraf_utils.py
blob: 2e72fbf6be0ab456a20620b2392d127b471ecfe9 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
import toml

def modify_input(new_pipeline_path, key, value):
    data = toml.load(new_pipeline_path)
    pluggin = data["inputs"]["http"][0]

    if key in pluggin:

        #print(f"Before: {key} = {pluggin[key]}")
        pluggin[key] = value
        #print(f"After:  {key} = {pluggin[key]}")


    with open(new_pipeline_path, "w") as f:
        toml.dump(data, f)


##modify_input("templates/basic_ETL.toml", "test_pipers.toml, "urls", ["stillTesting"])

def modify_agent(new_pipeline_path, key, value):
    data = toml.load(new_pipeline_path)
    pluggin = data["agent"]

    if key in pluggin:

        #print(f"Before: {key} = {pluggin[key]}")
        pluggin[key] = value
        #print(f"After:  {key} = {pluggin[key]}")


    with open(new_pipeline_path, "w") as f:
        toml.dump(data, f)


def modify_output(new_pipeline_path, key, value):
    data = toml.load(new_pipeline_path)
    pluggin = data["outputs"]["influxdb"][0]

    if key in pluggin:

        #print(f"Before: {key} = {pluggin[key]}")
        pluggin[key] = value
        #print(f"After:  {key} = {pluggin[key]}")


    with open(new_pipeline_path, "w") as f:
        toml.dump(data, f)




### different_jsonPaths_ETL template funcs ###


#def modify_processorsConventer(new_pipeline_path, key, value):
#    data = toml.load(new_pipeline_path)
#    #print(data)
#    pluggin = data["processors"]["converter"][0]["fields"]
#    print(pluggin)
#
#    if key in pluggin:
#        pluggin[key] = value
#    with open(new_pipeline_path, "w") as f:
#        toml.dump(data, f)
#
#
#def modify_processorsRename(new_pipeline_path, key, value):
#    data = toml.load(new_pipeline_path)
#    pluggin = data["processors"]["rename"][0]["replace"][0]
#    print(pluggin)
#    pluggin = data["processors"]["rename"][0]["replace"][1]
#    print(pluggin)
#
#    if key in pluggin:
#        pluggin[key] = value
#    with open(new_pipeline_path, "w") as f:
#        toml.dump(data, f)
#








### ChatGPT was used in the procesess of creating this function
##   def add_new_replace_block(new_pipeline_name):
##   
##   new_block = """  [[processors.rename.replace]]
##       field = "placeholder"
##       dest = "placeholder"
##   """
##   
##   with open(new_pipeline_name, "r") as file:
##       lines = file.readlines()
##   
##   # Find the last occurrence of '[[processors.rename.replace]]'
##   insert_index = -1
##   for i, line in enumerate(lines):
##       if line.strip().startswith("[[processors.rename.replace]]"):
##           insert_index = i
##   
##   while insert_index + 1 < len(lines) and lines[insert_index + 1].startswith(" "):
##       insert_index += 1
##   
##   # Insert the new block
##   lines.insert(insert_index + 1, new_block + "\n")
##   
##   with open(new_pipeline_name, "w") as file:
##       file.writelines(lines)
##